python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Physical device callbacks for vfio_ccw
*
* Copyright IBM Corp. 2017
* Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <[email protected]>
* Xiao Feng Ren <[email protected]>
* Cornelia Huck <[email protected]>
*/
#include <linux/vfio.h>
#include <linux/nospec.h>
#include <linux/slab.h>
#include "vfio_ccw_private.h"
static const struct vfio_device_ops vfio_ccw_dev_ops;
static int vfio_ccw_mdev_reset(struct vfio_ccw_private *private)
{
/*
* If the FSM state is seen as Not Operational after closing
* and re-opening the mdev, return an error.
*/
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
if (private->state == VFIO_CCW_STATE_NOT_OPER)
return -EINVAL;
return 0;
}
static void vfio_ccw_dma_unmap(struct vfio_device *vdev, u64 iova, u64 length)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
/* Drivers MUST unpin pages in response to an invalidation. */
if (!cp_iova_pinned(&private->cp, iova, length))
return;
vfio_ccw_mdev_reset(private);
}
static int vfio_ccw_mdev_init_dev(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
mutex_init(&private->io_mutex);
private->state = VFIO_CCW_STATE_STANDBY;
INIT_LIST_HEAD(&private->crw);
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
GFP_KERNEL);
if (!private->cp.guest_cp)
goto out_free_private;
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA);
if (!private->io_region)
goto out_free_cp;
private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
GFP_KERNEL | GFP_DMA);
if (!private->cmd_region)
goto out_free_io;
private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
GFP_KERNEL | GFP_DMA);
if (!private->schib_region)
goto out_free_cmd;
private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
GFP_KERNEL | GFP_DMA);
if (!private->crw_region)
goto out_free_schib;
return 0;
out_free_schib:
kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
out_free_cmd:
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
out_free_io:
kmem_cache_free(vfio_ccw_io_region, private->io_region);
out_free_cp:
kfree(private->cp.guest_cp);
out_free_private:
mutex_destroy(&private->io_mutex);
return -ENOMEM;
}
static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
{
struct subchannel *sch = to_subchannel(mdev->dev.parent);
struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
struct vfio_ccw_private *private;
int ret;
private = vfio_alloc_device(vfio_ccw_private, vdev, &mdev->dev,
&vfio_ccw_dev_ops);
if (IS_ERR(private))
return PTR_ERR(private);
dev_set_drvdata(&parent->dev, private);
VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: create\n",
sch->schid.cssid,
sch->schid.ssid,
sch->schid.sch_no);
ret = vfio_register_emulated_iommu_dev(&private->vdev);
if (ret)
goto err_put_vdev;
dev_set_drvdata(&mdev->dev, private);
return 0;
err_put_vdev:
dev_set_drvdata(&parent->dev, NULL);
vfio_put_device(&private->vdev);
return ret;
}
static void vfio_ccw_mdev_release_dev(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
struct vfio_ccw_crw *crw, *temp;
list_for_each_entry_safe(crw, temp, &private->crw, next) {
list_del(&crw->next);
kfree(crw);
}
kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private->cp.guest_cp);
mutex_destroy(&private->io_mutex);
}
static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
{
struct subchannel *sch = to_subchannel(mdev->dev.parent);
struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: remove\n",
sch->schid.cssid,
sch->schid.ssid,
sch->schid.sch_no);
vfio_unregister_group_dev(&private->vdev);
dev_set_drvdata(&parent->dev, NULL);
vfio_put_device(&private->vdev);
}
static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
int ret;
/* Device cannot simply be opened again from this state */
if (private->state == VFIO_CCW_STATE_NOT_OPER)
return -EINVAL;
ret = vfio_ccw_register_async_dev_regions(private);
if (ret)
return ret;
ret = vfio_ccw_register_schib_dev_regions(private);
if (ret)
goto out_unregister;
ret = vfio_ccw_register_crw_dev_regions(private);
if (ret)
goto out_unregister;
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_OPEN);
if (private->state == VFIO_CCW_STATE_NOT_OPER) {
ret = -EINVAL;
goto out_unregister;
}
return ret;
out_unregister:
vfio_ccw_unregister_dev_regions(private);
return ret;
}
static void vfio_ccw_mdev_close_device(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
vfio_ccw_unregister_dev_regions(private);
}
static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
char __user *buf, size_t count,
loff_t *ppos)
{
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_io_region *region;
int ret;
if (pos + count > sizeof(*region))
return -EINVAL;
mutex_lock(&private->io_mutex);
region = private->io_region;
if (copy_to_user(buf, (void *)region + pos, count))
ret = -EFAULT;
else
ret = count;
mutex_unlock(&private->io_mutex);
return ret;
}
static ssize_t vfio_ccw_mdev_read(struct vfio_device *vdev,
char __user *buf,
size_t count,
loff_t *ppos)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
return -EINVAL;
switch (index) {
case VFIO_CCW_CONFIG_REGION_INDEX:
return vfio_ccw_mdev_read_io_region(private, buf, count, ppos);
default:
index -= VFIO_CCW_NUM_REGIONS;
return private->region[index].ops->read(private, buf, count,
ppos);
}
return -EINVAL;
}
static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private,
const char __user *buf,
size_t count, loff_t *ppos)
{
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_io_region *region;
int ret;
if (pos + count > sizeof(*region))
return -EINVAL;
if (!mutex_trylock(&private->io_mutex))
return -EAGAIN;
region = private->io_region;
if (copy_from_user((void *)region + pos, buf, count)) {
ret = -EFAULT;
goto out_unlock;
}
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ);
ret = (region->ret_code != 0) ? region->ret_code : count;
out_unlock:
mutex_unlock(&private->io_mutex);
return ret;
}
static ssize_t vfio_ccw_mdev_write(struct vfio_device *vdev,
const char __user *buf,
size_t count,
loff_t *ppos)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
return -EINVAL;
switch (index) {
case VFIO_CCW_CONFIG_REGION_INDEX:
return vfio_ccw_mdev_write_io_region(private, buf, count, ppos);
default:
index -= VFIO_CCW_NUM_REGIONS;
return private->region[index].ops->write(private, buf, count,
ppos);
}
return -EINVAL;
}
static int vfio_ccw_mdev_get_device_info(struct vfio_ccw_private *private,
struct vfio_device_info *info)
{
info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
info->num_irqs = VFIO_CCW_NUM_IRQS;
return 0;
}
static int vfio_ccw_mdev_get_region_info(struct vfio_ccw_private *private,
struct vfio_region_info *info,
unsigned long arg)
{
int i;
switch (info->index) {
case VFIO_CCW_CONFIG_REGION_INDEX:
info->offset = 0;
info->size = sizeof(struct ccw_io_region);
info->flags = VFIO_REGION_INFO_FLAG_READ
| VFIO_REGION_INFO_FLAG_WRITE;
return 0;
default: /* all other regions are handled via capability chain */
{
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
struct vfio_region_info_cap_type cap_type = {
.header.id = VFIO_REGION_INFO_CAP_TYPE,
.header.version = 1 };
int ret;
if (info->index >=
VFIO_CCW_NUM_REGIONS + private->num_regions)
return -EINVAL;
info->index = array_index_nospec(info->index,
VFIO_CCW_NUM_REGIONS +
private->num_regions);
i = info->index - VFIO_CCW_NUM_REGIONS;
info->offset = VFIO_CCW_INDEX_TO_OFFSET(info->index);
info->size = private->region[i].size;
info->flags = private->region[i].flags;
cap_type.type = private->region[i].type;
cap_type.subtype = private->region[i].subtype;
ret = vfio_info_add_capability(&caps, &cap_type.header,
sizeof(cap_type));
if (ret)
return ret;
info->flags |= VFIO_REGION_INFO_FLAG_CAPS;
if (info->argsz < sizeof(*info) + caps.size) {
info->argsz = sizeof(*info) + caps.size;
info->cap_offset = 0;
} else {
vfio_info_cap_shift(&caps, sizeof(*info));
if (copy_to_user((void __user *)arg + sizeof(*info),
caps.buf, caps.size)) {
kfree(caps.buf);
return -EFAULT;
}
info->cap_offset = sizeof(*info);
}
kfree(caps.buf);
}
}
return 0;
}
static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
{
switch (info->index) {
case VFIO_CCW_IO_IRQ_INDEX:
case VFIO_CCW_CRW_IRQ_INDEX:
case VFIO_CCW_REQ_IRQ_INDEX:
info->count = 1;
info->flags = VFIO_IRQ_INFO_EVENTFD;
break;
default:
return -EINVAL;
}
return 0;
}
static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private,
uint32_t flags,
uint32_t index,
void __user *data)
{
struct eventfd_ctx **ctx;
if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
return -EINVAL;
switch (index) {
case VFIO_CCW_IO_IRQ_INDEX:
ctx = &private->io_trigger;
break;
case VFIO_CCW_CRW_IRQ_INDEX:
ctx = &private->crw_trigger;
break;
case VFIO_CCW_REQ_IRQ_INDEX:
ctx = &private->req_trigger;
break;
default:
return -EINVAL;
}
switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
case VFIO_IRQ_SET_DATA_NONE:
{
if (*ctx)
eventfd_signal(*ctx, 1);
return 0;
}
case VFIO_IRQ_SET_DATA_BOOL:
{
uint8_t trigger;
if (get_user(trigger, (uint8_t __user *)data))
return -EFAULT;
if (trigger && *ctx)
eventfd_signal(*ctx, 1);
return 0;
}
case VFIO_IRQ_SET_DATA_EVENTFD:
{
int32_t fd;
if (get_user(fd, (int32_t __user *)data))
return -EFAULT;
if (fd == -1) {
if (*ctx)
eventfd_ctx_put(*ctx);
*ctx = NULL;
} else if (fd >= 0) {
struct eventfd_ctx *efdctx;
efdctx = eventfd_ctx_fdget(fd);
if (IS_ERR(efdctx))
return PTR_ERR(efdctx);
if (*ctx)
eventfd_ctx_put(*ctx);
*ctx = efdctx;
} else
return -EINVAL;
return 0;
}
default:
return -EINVAL;
}
}
int vfio_ccw_register_dev_region(struct vfio_ccw_private *private,
unsigned int subtype,
const struct vfio_ccw_regops *ops,
size_t size, u32 flags, void *data)
{
struct vfio_ccw_region *region;
region = krealloc(private->region,
(private->num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
private->region = region;
private->region[private->num_regions].type = VFIO_REGION_TYPE_CCW;
private->region[private->num_regions].subtype = subtype;
private->region[private->num_regions].ops = ops;
private->region[private->num_regions].size = size;
private->region[private->num_regions].flags = flags;
private->region[private->num_regions].data = data;
private->num_regions++;
return 0;
}
void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
{
int i;
for (i = 0; i < private->num_regions; i++)
private->region[i].ops->release(private, &private->region[i]);
private->num_regions = 0;
kfree(private->region);
private->region = NULL;
}
static ssize_t vfio_ccw_mdev_ioctl(struct vfio_device *vdev,
unsigned int cmd,
unsigned long arg)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
int ret = 0;
unsigned long minsz;
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
{
struct vfio_device_info info;
minsz = offsetofend(struct vfio_device_info, num_irqs);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
ret = vfio_ccw_mdev_get_device_info(private, &info);
if (ret)
return ret;
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
}
case VFIO_DEVICE_GET_REGION_INFO:
{
struct vfio_region_info info;
minsz = offsetofend(struct vfio_region_info, offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
ret = vfio_ccw_mdev_get_region_info(private, &info, arg);
if (ret)
return ret;
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
}
case VFIO_DEVICE_GET_IRQ_INFO:
{
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS)
return -EINVAL;
ret = vfio_ccw_mdev_get_irq_info(&info);
if (ret)
return ret;
if (info.count == -1)
return -EINVAL;
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
}
case VFIO_DEVICE_SET_IRQS:
{
struct vfio_irq_set hdr;
size_t data_size;
void __user *data;
minsz = offsetofend(struct vfio_irq_set, count);
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
ret = vfio_set_irqs_validate_and_prepare(&hdr, 1,
VFIO_CCW_NUM_IRQS,
&data_size);
if (ret)
return ret;
data = (void __user *)(arg + minsz);
return vfio_ccw_mdev_set_irqs(private, hdr.flags, hdr.index,
data);
}
case VFIO_DEVICE_RESET:
return vfio_ccw_mdev_reset(private);
default:
return -ENOTTY;
}
}
/* Request removal of the device*/
static void vfio_ccw_mdev_request(struct vfio_device *vdev, unsigned int count)
{
struct vfio_ccw_private *private =
container_of(vdev, struct vfio_ccw_private, vdev);
struct device *dev = vdev->dev;
if (private->req_trigger) {
if (!(count % 10))
dev_notice_ratelimited(dev,
"Relaying device request to user (#%u)\n",
count);
eventfd_signal(private->req_trigger, 1);
} else if (count == 0) {
dev_notice(dev,
"No device request channel registered, blocked until released by user\n");
}
}
static const struct vfio_device_ops vfio_ccw_dev_ops = {
.init = vfio_ccw_mdev_init_dev,
.release = vfio_ccw_mdev_release_dev,
.open_device = vfio_ccw_mdev_open_device,
.close_device = vfio_ccw_mdev_close_device,
.read = vfio_ccw_mdev_read,
.write = vfio_ccw_mdev_write,
.ioctl = vfio_ccw_mdev_ioctl,
.request = vfio_ccw_mdev_request,
.dma_unmap = vfio_ccw_dma_unmap,
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
.attach_ioas = vfio_iommufd_emulated_attach_ioas,
.detach_ioas = vfio_iommufd_emulated_detach_ioas,
};
struct mdev_driver vfio_ccw_mdev_driver = {
.device_api = VFIO_DEVICE_API_CCW_STRING,
.max_instances = 1,
.driver = {
.name = "vfio_ccw_mdev",
.owner = THIS_MODULE,
.mod_name = KBUILD_MODNAME,
},
.probe = vfio_ccw_mdev_probe,
.remove = vfio_ccw_mdev_remove,
};
| linux-master | drivers/s390/cio/vfio_ccw_ops.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tracepoint definitions for s390_cio
*
* Copyright IBM Corp. 2015
* Author(s): Peter Oberparleiter <[email protected]>
*/
#include <asm/crw.h>
#include "cio.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
EXPORT_TRACEPOINT_SYMBOL(s390_cio_stsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_msch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_tsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_tpi);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_ssch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_csch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_hsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_xsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_rsch);
EXPORT_TRACEPOINT_SYMBOL(s390_cio_chsc);
| linux-master | drivers/s390/cio/trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Linux for s390 qdio support, buffer handling, qdio API and module support.
*
* Copyright IBM Corp. 2000, 2008
* Author(s): Utz Bacher <[email protected]>
* Jan Glauber <[email protected]>
* 2.6 cio integration by Cornelia Huck <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/ipl.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "qdio.h"
#include "qdio_debug.h"
MODULE_AUTHOR("Utz Bacher <[email protected]>,"\
"Jan Glauber <[email protected]>");
MODULE_DESCRIPTION("QDIO base support");
MODULE_LICENSE("GPL");
static inline int do_siga_sync(unsigned long schid,
unsigned long out_mask, unsigned long in_mask,
unsigned int fc)
{
int cc;
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[schid]\n"
" lgr 2,%[out]\n"
" lgr 3,%[in]\n"
" siga 0\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc)
: [fc] "d" (fc), [schid] "d" (schid),
[out] "d" (out_mask), [in] "d" (in_mask)
: "cc", "0", "1", "2", "3");
return cc;
}
static inline int do_siga_input(unsigned long schid, unsigned long mask,
unsigned long fc)
{
int cc;
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[schid]\n"
" lgr 2,%[mask]\n"
" siga 0\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc)
: [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask)
: "cc", "0", "1", "2");
return cc;
}
/**
* do_siga_output - perform SIGA-w/wt function
* @schid: subchannel id or in case of QEBSM the subchannel token
* @mask: which output queues to process
* @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
* @fc: function code to perform
* @aob: asynchronous operation block
*
* Returns condition code.
* Note: For IQDC unicast queues only the highest priority queue is processed.
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
unsigned int *bb, unsigned long fc,
unsigned long aob)
{
int cc;
asm volatile(
" lgr 0,%[fc]\n"
" lgr 1,%[schid]\n"
" lgr 2,%[mask]\n"
" lgr 3,%[aob]\n"
" siga 0\n"
" lgr %[fc],0\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (cc), [fc] "+&d" (fc)
: [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob)
: "cc", "0", "1", "2", "3");
*bb = fc >> 31;
return cc;
}
/**
* qdio_do_eqbs - extract buffer states for QEBSM
* @q: queue to manipulate
* @state: state of the extracted buffers
* @start: buffer number to start at
* @count: count of buffers to examine
* @auto_ack: automatically acknowledge buffers
*
* Returns the number of successfully extracted equal buffer states.
* Stops processing if a state is different from the last buffers state.
*/
static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
int start, int count, int auto_ack)
{
int tmp_count = count, tmp_start = start, nr = q->nr;
unsigned int ccq = 0;
qperf_inc(q, eqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
again:
ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
auto_ack);
switch (ccq) {
case 0:
case 32:
/* all done, or next buffer state different */
return count - tmp_count;
case 96:
/* not all buffers processed */
qperf_inc(q, eqbs_partial);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
tmp_count);
return count - tmp_count;
case 97:
/* no buffer processed */
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
goto again;
default:
DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
q->first_to_check, count, q->irq_ptr->int_parm);
return 0;
}
}
/**
* qdio_do_sqbs - set buffer states for QEBSM
* @q: queue to manipulate
* @state: new state of the buffers
* @start: first buffer number to change
* @count: how many buffers to change
*
* Returns the number of successfully changed buffers.
* Does retrying until the specified count of buffer states is set or an
* error occurs.
*/
static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
int count)
{
unsigned int ccq = 0;
int tmp_count = count, tmp_start = start;
int nr = q->nr;
qperf_inc(q, sqbs);
if (!q->is_input_q)
nr += q->irq_ptr->nr_input_qs;
again:
ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
switch (ccq) {
case 0:
case 32:
/* all done, or active buffer adapter-owned */
WARN_ON_ONCE(tmp_count);
return count - tmp_count;
case 96:
/* not all buffers processed */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
qperf_inc(q, sqbs_partial);
goto again;
default:
DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
q->first_to_check, count, q->irq_ptr->int_parm);
return 0;
}
}
/*
* Returns number of examined buffers and their common state in *state.
* Requested number of buffers-to-examine must be > 0.
*/
static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, unsigned int count,
int auto_ack)
{
unsigned char __state = 0;
int i = 1;
if (is_qebsm(q))
return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
/* get initial state: */
__state = q->slsb.val[bufnr];
/* Bail out early if there is no work on the queue: */
if (__state & SLSB_OWNER_CU)
goto out;
for (; i < count; i++) {
bufnr = next_buf(bufnr);
/* stop if next state differs from initial state: */
if (q->slsb.val[bufnr] != __state)
break;
}
out:
*state = __state;
return i;
}
static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, int auto_ack)
{
return get_buf_states(q, bufnr, state, 1, auto_ack);
}
/* wrap-around safe setting of slsb states, returns number of changed buffers */
static inline int set_buf_states(struct qdio_q *q, int bufnr,
unsigned char state, int count)
{
int i;
if (is_qebsm(q))
return qdio_do_sqbs(q, state, bufnr, count);
/* Ensure that all preceding changes to the SBALs are visible: */
mb();
for (i = 0; i < count; i++) {
WRITE_ONCE(q->slsb.val[bufnr], state);
bufnr = next_buf(bufnr);
}
/* Make our SLSB changes visible: */
mb();
return count;
}
static inline int set_buf_state(struct qdio_q *q, int bufnr,
unsigned char state)
{
return set_buf_states(q, bufnr, state, 1);
}
/* set slsb states to initial state */
static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
for_each_input_queue(irq_ptr, q, i)
set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
QDIO_MAX_BUFFERS_PER_Q);
for_each_output_queue(irq_ptr, q, i)
set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
QDIO_MAX_BUFFERS_PER_Q);
}
static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
unsigned int input)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_SYNC;
int cc;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
qperf_inc(q, siga_sync);
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
fc |= QDIO_SIGA_QEBSM_FLAG;
}
cc = do_siga_sync(schid, output, input, fc);
if (unlikely(cc))
DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
return (cc) ? -EIO : 0;
}
static inline int qdio_sync_input_queue(struct qdio_q *q)
{
return qdio_siga_sync(q, 0, q->mask);
}
static inline int qdio_sync_output_queue(struct qdio_q *q)
{
return qdio_siga_sync(q, q->mask, 0);
}
static inline int qdio_siga_sync_q(struct qdio_q *q)
{
if (q->is_input_q)
return qdio_sync_input_queue(q);
else
return qdio_sync_output_queue(q);
}
static int qdio_siga_output(struct qdio_q *q, unsigned int count,
unsigned int *busy_bit, unsigned long aob)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
u64 start_time = 0;
int retries = 0, cc;
if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
if (count > 1)
fc = QDIO_SIGA_WRITEM;
else if (aob)
fc = QDIO_SIGA_WRITEQ;
}
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
fc |= QDIO_SIGA_QEBSM_FLAG;
}
again:
cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
/* hipersocket busy condition */
if (unlikely(*busy_bit)) {
retries++;
if (!start_time) {
start_time = get_tod_clock_fast();
goto again;
}
if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
goto again;
}
if (retries) {
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
"%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
}
return cc;
}
static inline int qdio_siga_input(struct qdio_q *q)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_READ;
int cc;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
qperf_inc(q, siga_read);
if (is_qebsm(q)) {
schid = q->irq_ptr->sch_token;
fc |= QDIO_SIGA_QEBSM_FLAG;
}
cc = do_siga_input(schid, q->mask, fc);
if (unlikely(cc))
DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
return (cc) ? -EIO : 0;
}
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state)
{
if (qdio_need_siga_sync(q->irq_ptr))
qdio_siga_sync_q(q);
return get_buf_state(q, bufnr, state, 0);
}
static inline void qdio_stop_polling(struct qdio_q *q)
{
if (!q->u.in.batch_count)
return;
qperf_inc(q, stop_polling);
/* show the card that we are not polling anymore */
set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
q->u.in.batch_count);
q->u.in.batch_count = 0;
}
static inline void account_sbals(struct qdio_q *q, unsigned int count)
{
q->q_stats.nr_sbal_total += count;
q->q_stats.nr_sbals[ilog2(count)]++;
}
static void process_buffer_error(struct qdio_q *q, unsigned int start,
int count)
{
/* special handling for no target buffer empty */
if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
q->sbal[start]->element[15].sflags == 0x10) {
qperf_inc(q, target_full);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
return;
}
DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
DBF_ERROR("FTC:%3d C:%3d", start, count);
DBF_ERROR("F14:%2x F15:%2x",
q->sbal[start]->element[14].sflags,
q->sbal[start]->element[15].sflags);
}
static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
int count, bool auto_ack)
{
/* ACK the newest SBAL: */
if (!auto_ack)
set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
if (!q->u.in.batch_count)
q->u.in.batch_start = start;
q->u.in.batch_count += count;
}
static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
unsigned int *error)
{
unsigned char state = 0;
int count;
q->timestamp = get_tod_clock_fast();
count = atomic_read(&q->nr_buf_used);
if (!count)
return 0;
if (qdio_need_siga_sync(q->irq_ptr))
qdio_sync_input_queue(q);
count = get_buf_states(q, start, &state, count, 1);
if (!count)
return 0;
switch (state) {
case SLSB_P_INPUT_PRIMED:
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
count);
inbound_handle_work(q, start, count, is_qebsm(q));
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
return count;
case SLSB_P_INPUT_ERROR:
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
count);
*error = QDIO_ERROR_SLSB_STATE;
process_buffer_error(q, start, count);
inbound_handle_work(q, start, count, false);
if (atomic_sub_return(count, &q->nr_buf_used) == 0)
qperf_inc(q, inbound_queue_full);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
return count;
case SLSB_CU_INPUT_EMPTY:
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
q->nr, start);
return 0;
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_INPUT_ACK:
/* We should never see this state, throw a WARN: */
default:
dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
"found state %#x at index %u on queue %u\n",
state, start, q->nr);
return 0;
}
}
int qdio_inspect_input_queue(struct ccw_device *cdev, unsigned int nr,
unsigned int *bufnr, unsigned int *error)
{
struct qdio_irq *irq = cdev->private->qdio_data;
unsigned int start;
struct qdio_q *q;
int count;
if (!irq)
return -ENODEV;
q = irq->input_qs[nr];
start = q->first_to_check;
*error = 0;
count = get_inbound_buffer_frontier(q, start, error);
if (count == 0)
return 0;
*bufnr = start;
q->first_to_check = add_buf(start, count);
return count;
}
EXPORT_SYMBOL_GPL(qdio_inspect_input_queue);
static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
{
unsigned char state = 0;
if (!atomic_read(&q->nr_buf_used))
return 1;
if (qdio_need_siga_sync(q->irq_ptr))
qdio_sync_input_queue(q);
get_buf_state(q, start, &state, 0);
if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
/* more work coming */
return 0;
return 1;
}
static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
unsigned int *error)
{
unsigned char state = 0;
int count;
q->timestamp = get_tod_clock_fast();
count = atomic_read(&q->nr_buf_used);
if (!count)
return 0;
if (qdio_need_siga_sync(q->irq_ptr))
qdio_sync_output_queue(q);
count = get_buf_states(q, start, &state, count, 0);
if (!count)
return 0;
switch (state) {
case SLSB_P_OUTPUT_PENDING:
*error = QDIO_ERROR_SLSB_PENDING;
fallthrough;
case SLSB_P_OUTPUT_EMPTY:
/* the adapter got it */
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
"out empty:%1d %02x", q->nr, count);
atomic_sub(count, &q->nr_buf_used);
if (q->irq_ptr->perf_stat_enabled)
account_sbals(q, count);
return count;
case SLSB_P_OUTPUT_ERROR:
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x",
q->nr, count);
*error = QDIO_ERROR_SLSB_STATE;
process_buffer_error(q, start, count);
atomic_sub(count, &q->nr_buf_used);
if (q->irq_ptr->perf_stat_enabled)
account_sbals_error(q, count);
return count;
case SLSB_CU_OUTPUT_PRIMED:
/* the adapter has not fetched the output yet */
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
q->nr);
return 0;
case SLSB_P_OUTPUT_HALTED:
return 0;
case SLSB_P_OUTPUT_NOT_INIT:
/* We should never see this state, throw a WARN: */
default:
dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
"found state %#x at index %u on queue %u\n",
state, start, q->nr);
return 0;
}
}
int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr,
unsigned int *bufnr, unsigned int *error)
{
struct qdio_irq *irq = cdev->private->qdio_data;
unsigned int start;
struct qdio_q *q;
int count;
if (!irq)
return -ENODEV;
q = irq->output_qs[nr];
start = q->first_to_check;
*error = 0;
count = get_outbound_buffer_frontier(q, start, error);
if (count == 0)
return 0;
*bufnr = start;
q->first_to_check = add_buf(start, count);
return count;
}
EXPORT_SYMBOL_GPL(qdio_inspect_output_queue);
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
unsigned long aob)
{
int retries = 0, cc;
unsigned int busy_bit;
if (!qdio_need_siga_out(q->irq_ptr))
return 0;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
retry:
qperf_inc(q, siga_write);
cc = qdio_siga_output(q, count, &busy_bit, aob);
switch (cc) {
case 0:
break;
case 2:
if (busy_bit) {
while (++retries < QDIO_BUSY_BIT_RETRIES) {
mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
goto retry;
}
DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
cc = -EBUSY;
} else {
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
cc = -ENOBUFS;
}
break;
case 1:
case 3:
DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
cc = -EIO;
break;
}
if (retries) {
DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
DBF_ERROR("count:%u", retries);
}
return cc;
}
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
enum qdio_irq_states state)
{
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
irq_ptr->state = state;
mb();
}
static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
{
if (irb->esw.esw0.erw.cons) {
DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
DBF_ERROR_HEX(irb, 64);
DBF_ERROR_HEX(irb->ecw, 64);
}
}
/* PCI interrupt handler */
static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
{
if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
return;
qdio_deliver_irq(irq_ptr);
irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
}
static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
unsigned long intparm, int cstat,
int dstat)
{
unsigned int first_to_check = 0;
DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
DBF_ERROR("intp :%lx", intparm);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
/* zfcp wants this: */
if (irq_ptr->nr_input_qs)
first_to_check = irq_ptr->input_qs[0]->first_to_check;
irq_ptr->error_handler(irq_ptr->cdev, QDIO_ERROR_ACTIVATE, 0,
first_to_check, 0, irq_ptr->int_parm);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
/*
* In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
* Therefore we call the LGR detection function here.
*/
lgr_info_log();
}
static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
int dstat)
{
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
if (cstat)
goto error;
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
goto error;
if (!(dstat & DEV_STAT_DEV_END))
goto error;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
return;
error:
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
}
/* qdio interrupt handler */
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
int cstat, dstat;
if (!intparm || !irq_ptr) {
ccw_device_get_schid(cdev, &schid);
DBF_ERROR("qint:%4x", schid.sch_no);
return;
}
if (irq_ptr->perf_stat_enabled)
irq_ptr->perf_stat.qdio_int++;
if (IS_ERR(irb)) {
DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
wake_up(&cdev->private->wait_q);
return;
}
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
qdio_establish_handle_irq(irq_ptr, cstat, dstat);
break;
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
break;
case QDIO_IRQ_STATE_ESTABLISHED:
case QDIO_IRQ_STATE_ACTIVE:
if (cstat & SCHN_STAT_PCI) {
qdio_int_handler_pci(irq_ptr);
return;
}
if (cstat || dstat)
qdio_handle_activate_check(irq_ptr, intparm, cstat,
dstat);
break;
case QDIO_IRQ_STATE_STOPPED:
break;
default:
WARN_ON_ONCE(1);
}
wake_up(&cdev->private->wait_q);
}
/**
* qdio_get_ssqd_desc - get qdio subchannel description
* @cdev: ccw device to get description for
* @data: where to store the ssqd
*
* Returns 0 or an error code. The results of the chsc are stored in the
* specified structure.
*/
int qdio_get_ssqd_desc(struct ccw_device *cdev,
struct qdio_ssqd_desc *data)
{
struct subchannel_id schid;
if (!cdev || !cdev->private)
return -EINVAL;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("get ssqd:%4x", schid.sch_no);
return qdio_setup_get_ssqd(NULL, &schid, data);
}
EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
static int qdio_cancel_ccw(struct qdio_irq *irq, int how)
{
struct ccw_device *cdev = irq->cdev;
long timeout;
int rc;
spin_lock_irq(get_ccwdev_lock(cdev));
qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP);
if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
else
/* default behaviour is halt */
rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
spin_unlock_irq(get_ccwdev_lock(cdev));
if (rc) {
DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no);
DBF_ERROR("rc:%4d", rc);
return rc;
}
timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
irq->state == QDIO_IRQ_STATE_INACTIVE ||
irq->state == QDIO_IRQ_STATE_ERR,
10 * HZ);
if (timeout <= 0)
rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
return rc;
}
/**
* qdio_shutdown - shut down a qdio subchannel
* @cdev: associated ccw device
* @how: use halt or clear to shutdown
*/
int qdio_shutdown(struct ccw_device *cdev, int how)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
int rc;
if (!irq_ptr)
return -ENODEV;
WARN_ON_ONCE(irqs_disabled());
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qshutdown:%4x", schid.sch_no);
mutex_lock(&irq_ptr->setup_mutex);
/*
* Subchannel was already shot down. We cannot prevent being called
* twice since cio may trigger a shutdown asynchronously.
*/
if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
mutex_unlock(&irq_ptr->setup_mutex);
return 0;
}
/*
* Indicate that the device is going down.
*/
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
qdio_shutdown_debug_entries(irq_ptr);
rc = qdio_cancel_ccw(irq_ptr, how);
qdio_shutdown_thinint(irq_ptr);
qdio_shutdown_irq(irq_ptr);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
mutex_unlock(&irq_ptr->setup_mutex);
if (rc)
return rc;
return 0;
}
EXPORT_SYMBOL_GPL(qdio_shutdown);
/**
* qdio_free - free data structures for a qdio subchannel
* @cdev: associated ccw device
*/
int qdio_free(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
if (!irq_ptr)
return -ENODEV;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qfree:%4x", schid.sch_no);
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
mutex_lock(&irq_ptr->setup_mutex);
irq_ptr->debug_area = NULL;
cdev->private->qdio_data = NULL;
mutex_unlock(&irq_ptr->setup_mutex);
qdio_free_queues(irq_ptr);
free_page((unsigned long) irq_ptr->qdr);
free_page(irq_ptr->chsc_page);
kfree(irq_ptr->ccw);
free_page((unsigned long) irq_ptr);
return 0;
}
EXPORT_SYMBOL_GPL(qdio_free);
/**
* qdio_allocate - allocate qdio queues and associated data
* @cdev: associated ccw device
* @no_input_qs: allocate this number of Input Queues
* @no_output_qs: allocate this number of Output Queues
*/
int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
unsigned int no_output_qs)
{
struct subchannel_id schid;
struct qdio_irq *irq_ptr;
int rc = -ENOMEM;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qallocate:%4x", schid.sch_no);
if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
return -EINVAL;
irq_ptr = (void *) get_zeroed_page(GFP_KERNEL);
if (!irq_ptr)
return -ENOMEM;
irq_ptr->ccw = kmalloc(sizeof(*irq_ptr->ccw), GFP_KERNEL | GFP_DMA);
if (!irq_ptr->ccw)
goto err_ccw;
/* kmemleak doesn't scan the page-allocated irq_ptr: */
kmemleak_not_leak(irq_ptr->ccw);
irq_ptr->cdev = cdev;
mutex_init(&irq_ptr->setup_mutex);
if (qdio_allocate_dbf(irq_ptr))
goto err_dbf;
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
no_output_qs);
/*
* Allocate a page for the chsc calls in qdio_establish.
* Must be pre-allocated since a zfcp recovery will call
* qdio_establish. In case of low memory and swap on a zfcp disk
* we may not be able to allocate memory otherwise.
*/
irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
if (!irq_ptr->chsc_page)
goto err_chsc;
/* qdr is used in ccw1.cda which is u32 */
irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!irq_ptr->qdr)
goto err_qdr;
rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
if (rc)
goto err_queues;
cdev->private->qdio_data = irq_ptr;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
return 0;
err_queues:
free_page((unsigned long) irq_ptr->qdr);
err_qdr:
free_page(irq_ptr->chsc_page);
err_chsc:
err_dbf:
kfree(irq_ptr->ccw);
err_ccw:
free_page((unsigned long) irq_ptr);
return rc;
}
EXPORT_SYMBOL_GPL(qdio_allocate);
static void qdio_trace_init_data(struct qdio_irq *irq,
struct qdio_initialize *data)
{
DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
data->no_output_qs);
DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
DBF_ERR);
}
/**
* qdio_establish - establish queues on a qdio subchannel
* @cdev: associated ccw device
* @init_data: initialization data
*/
int qdio_establish(struct ccw_device *cdev,
struct qdio_initialize *init_data)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
struct ciw *ciw;
long timeout;
int rc;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qestablish:%4x", schid.sch_no);
if (!irq_ptr)
return -ENODEV;
if (init_data->no_input_qs > irq_ptr->max_input_qs ||
init_data->no_output_qs > irq_ptr->max_output_qs)
return -EINVAL;
/* Needed as error_handler: */
if (!init_data->input_handler)
return -EINVAL;
if (init_data->no_output_qs && !init_data->output_handler)
return -EINVAL;
if (!init_data->input_sbal_addr_array ||
!init_data->output_sbal_addr_array)
return -EINVAL;
if (!init_data->irq_poll)
return -EINVAL;
ciw = ccw_device_get_ciw(cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO EQ", schid.sch_no);
return -EIO;
}
mutex_lock(&irq_ptr->setup_mutex);
qdio_trace_init_data(irq_ptr, init_data);
qdio_setup_irq(irq_ptr, init_data);
rc = qdio_establish_thinint(irq_ptr);
if (rc)
goto err_thinint;
/* establish q */
irq_ptr->ccw->cmd_code = ciw->cmd;
irq_ptr->ccw->flags = CCW_FLAG_SLI;
irq_ptr->ccw->count = ciw->count;
irq_ptr->ccw->cda = (u32) virt_to_phys(irq_ptr->qdr);
spin_lock_irq(get_ccwdev_lock(cdev));
ccw_device_set_options_mask(cdev, 0);
rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
spin_unlock_irq(get_ccwdev_lock(cdev));
if (rc) {
DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
goto err_ccw_start;
}
timeout = wait_event_interruptible_timeout(cdev->private->wait_q,
irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
if (timeout <= 0) {
rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
goto err_ccw_timeout;
}
if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
rc = -EIO;
goto err_ccw_error;
}
qdio_setup_ssqd_info(irq_ptr);
/* qebsm is now setup if available, initialize buffer states */
qdio_init_buf_states(irq_ptr);
mutex_unlock(&irq_ptr->setup_mutex);
qdio_print_subchannel_info(irq_ptr);
qdio_setup_debug_entries(irq_ptr);
return 0;
err_ccw_timeout:
qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR);
err_ccw_error:
err_ccw_start:
qdio_shutdown_thinint(irq_ptr);
err_thinint:
qdio_shutdown_irq(irq_ptr);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
mutex_unlock(&irq_ptr->setup_mutex);
return rc;
}
EXPORT_SYMBOL_GPL(qdio_establish);
/**
* qdio_activate - activate queues on a qdio subchannel
* @cdev: associated cdev
*/
int qdio_activate(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
struct ciw *ciw;
int rc;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qactivate:%4x", schid.sch_no);
if (!irq_ptr)
return -ENODEV;
ciw = ccw_device_get_ciw(cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO AQ", schid.sch_no);
return -EIO;
}
mutex_lock(&irq_ptr->setup_mutex);
if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
rc = -EBUSY;
goto out;
}
irq_ptr->ccw->cmd_code = ciw->cmd;
irq_ptr->ccw->flags = CCW_FLAG_SLI;
irq_ptr->ccw->count = ciw->count;
irq_ptr->ccw->cda = 0;
spin_lock_irq(get_ccwdev_lock(cdev));
ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
rc = ccw_device_start(cdev, irq_ptr->ccw, QDIO_DOING_ACTIVATE,
0, DOIO_DENY_PREFETCH);
spin_unlock_irq(get_ccwdev_lock(cdev));
if (rc) {
DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%4x", rc);
goto out;
}
/* wait for subchannel to become active */
msleep(5);
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_STOPPED:
case QDIO_IRQ_STATE_ERR:
rc = -EIO;
break;
default:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
rc = 0;
}
out:
mutex_unlock(&irq_ptr->setup_mutex);
return rc;
}
EXPORT_SYMBOL_GPL(qdio_activate);
/**
* handle_inbound - reset processed input buffers
* @q: queue containing the buffers
* @bufnr: first buffer to process
* @count: how many buffers are emptied
*/
static int handle_inbound(struct qdio_q *q, int bufnr, int count)
{
int overlap;
qperf_inc(q, inbound_call);
/* If any processed SBALs are returned to HW, adjust our tracking: */
overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
q->u.in.batch_count);
if (overlap > 0) {
q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
q->u.in.batch_count -= overlap;
}
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
atomic_add(count, &q->nr_buf_used);
if (qdio_need_siga_in(q->irq_ptr))
return qdio_siga_input(q);
return 0;
}
/**
* qdio_add_bufs_to_input_queue - process buffers on an Input Queue
* @cdev: associated ccw_device for the qdio subchannel
* @q_nr: queue number
* @bufnr: buffer number
* @count: how many buffers to process
*/
int qdio_add_bufs_to_input_queue(struct ccw_device *cdev, unsigned int q_nr,
unsigned int bufnr, unsigned int count)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
if (!irq_ptr)
return -ENODEV;
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addi b:%02x c:%02x", bufnr, count);
if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
return -EIO;
if (!count)
return 0;
return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count);
}
EXPORT_SYMBOL_GPL(qdio_add_bufs_to_input_queue);
/**
* handle_outbound - process filled outbound buffers
* @q: queue containing the buffers
* @bufnr: first buffer to process
* @count: how many buffers are filled
* @aob: asynchronous operation block
*/
static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count,
struct qaob *aob)
{
unsigned char state = 0;
int used, rc = 0;
qperf_inc(q, outbound_call);
count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
used = atomic_add_return(count, &q->nr_buf_used);
if (used == QDIO_MAX_BUFFERS_PER_Q)
qperf_inc(q, outbound_queue_full);
if (queue_type(q) == QDIO_IQDIO_QFMT) {
unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
rc = qdio_kick_outbound_q(q, count, phys_aob);
} else if (qdio_need_siga_sync(q->irq_ptr)) {
rc = qdio_sync_output_queue(q);
} else if (count < QDIO_MAX_BUFFERS_PER_Q &&
get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
state == SLSB_CU_OUTPUT_PRIMED) {
/* The previous buffer is not processed yet, tack on. */
qperf_inc(q, fast_requeue);
} else {
rc = qdio_kick_outbound_q(q, count, 0);
}
return rc;
}
/**
* qdio_add_bufs_to_output_queue - process buffers on an Output Queue
* @cdev: associated ccw_device for the qdio subchannel
* @q_nr: queue number
* @bufnr: buffer number
* @count: how many buffers to process
* @aob: asynchronous operation block
*/
int qdio_add_bufs_to_output_queue(struct ccw_device *cdev, unsigned int q_nr,
unsigned int bufnr, unsigned int count,
struct qaob *aob)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
if (!irq_ptr)
return -ENODEV;
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "addo b:%02x c:%02x", bufnr, count);
if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
return -EIO;
if (!count)
return 0;
return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob);
}
EXPORT_SYMBOL_GPL(qdio_add_bufs_to_output_queue);
/**
* qdio_start_irq - enable interrupt processing for the device
* @cdev: associated ccw_device for the qdio subchannel
*
* Return codes
* 0 - success
* 1 - irqs not started since new data is available
*/
int qdio_start_irq(struct ccw_device *cdev)
{
struct qdio_q *q;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
unsigned int i;
if (!irq_ptr)
return -ENODEV;
for_each_input_queue(irq_ptr, q, i)
qdio_stop_polling(q);
clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
if (test_nonshared_ind(irq_ptr))
goto rescan;
for_each_input_queue(irq_ptr, q, i) {
if (!qdio_inbound_q_done(q, q->first_to_check))
goto rescan;
}
return 0;
rescan:
if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
return 0;
else
return 1;
}
EXPORT_SYMBOL(qdio_start_irq);
/**
* qdio_stop_irq - disable interrupt processing for the device
* @cdev: associated ccw_device for the qdio subchannel
*
* Return codes
* 0 - interrupts were already disabled
* 1 - interrupts successfully disabled
*/
int qdio_stop_irq(struct ccw_device *cdev)
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
return 0;
else
return 1;
}
EXPORT_SYMBOL(qdio_stop_irq);
static int __init init_QDIO(void)
{
int rc;
rc = qdio_debug_init();
if (rc)
return rc;
rc = qdio_setup_init();
if (rc)
goto out_debug;
rc = qdio_thinint_init();
if (rc)
goto out_cache;
return 0;
out_cache:
qdio_setup_exit();
out_debug:
qdio_debug_exit();
return rc;
}
static void __exit exit_QDIO(void)
{
qdio_thinint_exit();
qdio_setup_exit();
qdio_debug_exit();
}
module_init(init_QDIO);
module_exit(exit_QDIO);
| linux-master | drivers/s390/cio/qdio_main.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions for assembling fcx enabled I/O control blocks.
*
* Copyright IBM Corp. 2008
* Author(s): Peter Oberparleiter <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <asm/fcx.h>
#include "cio.h"
/**
* tcw_get_intrg - return pointer to associated interrogate tcw
* @tcw: pointer to the original tcw
*
* Return a pointer to the interrogate tcw associated with the specified tcw
* or %NULL if there is no associated interrogate tcw.
*/
struct tcw *tcw_get_intrg(struct tcw *tcw)
{
return phys_to_virt(tcw->intrg);
}
EXPORT_SYMBOL(tcw_get_intrg);
/**
* tcw_get_data - return pointer to input/output data associated with tcw
* @tcw: pointer to the tcw
*
* Return the input or output data address specified in the tcw depending
* on whether the r-bit or the w-bit is set. If neither bit is set, return
* %NULL.
*/
void *tcw_get_data(struct tcw *tcw)
{
if (tcw->r)
return phys_to_virt(tcw->input);
if (tcw->w)
return phys_to_virt(tcw->output);
return NULL;
}
EXPORT_SYMBOL(tcw_get_data);
/**
* tcw_get_tccb - return pointer to tccb associated with tcw
* @tcw: pointer to the tcw
*
* Return pointer to the tccb associated with this tcw.
*/
struct tccb *tcw_get_tccb(struct tcw *tcw)
{
return phys_to_virt(tcw->tccb);
}
EXPORT_SYMBOL(tcw_get_tccb);
/**
* tcw_get_tsb - return pointer to tsb associated with tcw
* @tcw: pointer to the tcw
*
* Return pointer to the tsb associated with this tcw.
*/
struct tsb *tcw_get_tsb(struct tcw *tcw)
{
return phys_to_virt(tcw->tsb);
}
EXPORT_SYMBOL(tcw_get_tsb);
/**
* tcw_init - initialize tcw data structure
* @tcw: pointer to the tcw to be initialized
* @r: initial value of the r-bit
* @w: initial value of the w-bit
*
* Initialize all fields of the specified tcw data structure with zero and
* fill in the format, flags, r and w fields.
*/
void tcw_init(struct tcw *tcw, int r, int w)
{
memset(tcw, 0, sizeof(struct tcw));
tcw->format = TCW_FORMAT_DEFAULT;
tcw->flags = TCW_FLAGS_TIDAW_FORMAT(TCW_TIDAW_FORMAT_DEFAULT);
if (r)
tcw->r = 1;
if (w)
tcw->w = 1;
}
EXPORT_SYMBOL(tcw_init);
static inline size_t tca_size(struct tccb *tccb)
{
return tccb->tcah.tcal - 12;
}
static u32 calc_dcw_count(struct tccb *tccb)
{
int offset;
struct dcw *dcw;
u32 count = 0;
size_t size;
size = tca_size(tccb);
for (offset = 0; offset < size;) {
dcw = (struct dcw *) &tccb->tca[offset];
count += dcw->count;
if (!(dcw->flags & DCW_FLAGS_CC))
break;
offset += sizeof(struct dcw) + ALIGN((int) dcw->cd_count, 4);
}
return count;
}
static u32 calc_cbc_size(struct tidaw *tidaw, int num)
{
int i;
u32 cbc_data;
u32 cbc_count = 0;
u64 data_count = 0;
for (i = 0; i < num; i++) {
if (tidaw[i].flags & TIDAW_FLAGS_LAST)
break;
/* TODO: find out if padding applies to total of data
* transferred or data transferred by this tidaw. Assumption:
* applies to total. */
data_count += tidaw[i].count;
if (tidaw[i].flags & TIDAW_FLAGS_INSERT_CBC) {
cbc_data = 4 + ALIGN(data_count, 4) - data_count;
cbc_count += cbc_data;
data_count += cbc_data;
}
}
return cbc_count;
}
/**
* tcw_finalize - finalize tcw length fields and tidaw list
* @tcw: pointer to the tcw
* @num_tidaws: the number of tidaws used to address input/output data or zero
* if no tida is used
*
* Calculate the input-/output-count and tccbl field in the tcw, add a
* tcat the tccb and terminate the data tidaw list if used.
*
* Note: in case input- or output-tida is used, the tidaw-list must be stored
* in contiguous storage (no ttic). The tcal field in the tccb must be
* up-to-date.
*/
void tcw_finalize(struct tcw *tcw, int num_tidaws)
{
struct tidaw *tidaw;
struct tccb *tccb;
struct tccb_tcat *tcat;
u32 count;
/* Terminate tidaw list. */
tidaw = tcw_get_data(tcw);
if (num_tidaws > 0)
tidaw[num_tidaws - 1].flags |= TIDAW_FLAGS_LAST;
/* Add tcat to tccb. */
tccb = tcw_get_tccb(tcw);
tcat = (struct tccb_tcat *) &tccb->tca[tca_size(tccb)];
memset(tcat, 0, sizeof(*tcat));
/* Calculate tcw input/output count and tcat transport count. */
count = calc_dcw_count(tccb);
if (tcw->w && (tcw->flags & TCW_FLAGS_OUTPUT_TIDA))
count += calc_cbc_size(tidaw, num_tidaws);
if (tcw->r)
tcw->input_count = count;
else if (tcw->w)
tcw->output_count = count;
tcat->count = ALIGN(count, 4) + 4;
/* Calculate tccbl. */
tcw->tccbl = (sizeof(struct tccb) + tca_size(tccb) +
sizeof(struct tccb_tcat) - 20) >> 2;
}
EXPORT_SYMBOL(tcw_finalize);
/**
* tcw_set_intrg - set the interrogate tcw address of a tcw
* @tcw: the tcw address
* @intrg_tcw: the address of the interrogate tcw
*
* Set the address of the interrogate tcw in the specified tcw.
*/
void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
{
tcw->intrg = (u32)virt_to_phys(intrg_tcw);
}
EXPORT_SYMBOL(tcw_set_intrg);
/**
* tcw_set_data - set data address and tida flag of a tcw
* @tcw: the tcw address
* @data: the data address
* @use_tidal: zero of the data address specifies a contiguous block of data,
* non-zero if it specifies a list if tidaws.
*
* Set the input/output data address of a tcw (depending on the value of the
* r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
* is set as well.
*/
void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
{
if (tcw->r) {
tcw->input = virt_to_phys(data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_INPUT_TIDA;
} else if (tcw->w) {
tcw->output = virt_to_phys(data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
}
}
EXPORT_SYMBOL(tcw_set_data);
/**
* tcw_set_tccb - set tccb address of a tcw
* @tcw: the tcw address
* @tccb: the tccb address
*
* Set the address of the tccb in the specified tcw.
*/
void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
{
tcw->tccb = virt_to_phys(tccb);
}
EXPORT_SYMBOL(tcw_set_tccb);
/**
* tcw_set_tsb - set tsb address of a tcw
* @tcw: the tcw address
* @tsb: the tsb address
*
* Set the address of the tsb in the specified tcw.
*/
void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
{
tcw->tsb = virt_to_phys(tsb);
}
EXPORT_SYMBOL(tcw_set_tsb);
/**
* tccb_init - initialize tccb
* @tccb: the tccb address
* @size: the maximum size of the tccb
* @sac: the service-action-code to be user
*
* Initialize the header of the specified tccb by resetting all values to zero
* and filling in defaults for format, sac and initial tcal fields.
*/
void tccb_init(struct tccb *tccb, size_t size, u32 sac)
{
memset(tccb, 0, size);
tccb->tcah.format = TCCB_FORMAT_DEFAULT;
tccb->tcah.sac = sac;
tccb->tcah.tcal = 12;
}
EXPORT_SYMBOL(tccb_init);
/**
* tsb_init - initialize tsb
* @tsb: the tsb address
*
* Initialize the specified tsb by resetting all values to zero.
*/
void tsb_init(struct tsb *tsb)
{
memset(tsb, 0, sizeof(*tsb));
}
EXPORT_SYMBOL(tsb_init);
/**
* tccb_add_dcw - add a dcw to the tccb
* @tccb: the tccb address
* @tccb_size: the maximum tccb size
* @cmd: the dcw command
* @flags: flags for the dcw
* @cd: pointer to control data for this dcw or NULL if none is required
* @cd_count: number of control data bytes for this dcw
* @count: number of data bytes for this dcw
*
* Add a new dcw to the specified tccb by writing the dcw information specified
* by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
* a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
* would exceed the available space as defined by @tccb_size.
*
* Note: the tcal field of the tccb header will be updates to reflect added
* content.
*/
struct dcw *tccb_add_dcw(struct tccb *tccb, size_t tccb_size, u8 cmd, u8 flags,
void *cd, u8 cd_count, u32 count)
{
struct dcw *dcw;
int size;
int tca_offset;
/* Check for space. */
tca_offset = tca_size(tccb);
size = ALIGN(sizeof(struct dcw) + cd_count, 4);
if (sizeof(struct tccb_tcah) + tca_offset + size +
sizeof(struct tccb_tcat) > tccb_size)
return ERR_PTR(-ENOSPC);
/* Add dcw to tca. */
dcw = (struct dcw *) &tccb->tca[tca_offset];
memset(dcw, 0, size);
dcw->cmd = cmd;
dcw->flags = flags;
dcw->count = count;
dcw->cd_count = cd_count;
if (cd)
memcpy(&dcw->cd[0], cd, cd_count);
tccb->tcah.tcal += size;
return dcw;
}
EXPORT_SYMBOL(tccb_add_dcw);
/**
* tcw_add_tidaw - add a tidaw to a tcw
* @tcw: the tcw address
* @num_tidaws: the current number of tidaws
* @flags: flags for the new tidaw
* @addr: address value for the new tidaw
* @count: count value for the new tidaw
*
* Add a new tidaw to the input/output data tidaw-list of the specified tcw
* (depending on the value of the r-flag and w-flag) and return a pointer to
* the new tidaw.
*
* Note: the tidaw-list is assumed to be contiguous with no ttics. The caller
* must ensure that there is enough space for the new tidaw. The last-tidaw
* flag for the last tidaw in the list will be set by tcw_finalize.
*/
struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
void *addr, u32 count)
{
struct tidaw *tidaw;
/* Add tidaw to tidaw-list. */
tidaw = ((struct tidaw *) tcw_get_data(tcw)) + num_tidaws;
memset(tidaw, 0, sizeof(struct tidaw));
tidaw->flags = flags;
tidaw->count = count;
tidaw->addr = virt_to_phys(addr);
return tidaw;
}
EXPORT_SYMBOL(tcw_add_tidaw);
| linux-master | drivers/s390/cio/fcx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for s390 eadm subchannels
*
* Copyright IBM Corp. 2012
* Author(s): Sebastian Ott <[email protected]>
*/
#include <linux/kernel_stat.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/io.h>
#include <asm/css_chars.h>
#include <asm/debug.h>
#include <asm/isc.h>
#include <asm/cio.h>
#include <asm/scsw.h>
#include <asm/eadm.h>
#include "eadm_sch.h"
#include "ioasm.h"
#include "cio.h"
#include "css.h"
#include "orb.h"
MODULE_DESCRIPTION("driver for s390 eadm subchannels");
MODULE_LICENSE("GPL");
#define EADM_TIMEOUT (7 * HZ)
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(eadm_list);
static debug_info_t *eadm_debug;
#define EADM_LOG(imp, txt) do { \
debug_text_event(eadm_debug, imp, txt); \
} while (0)
static void EADM_LOG_HEX(int level, void *data, int length)
{
debug_event(eadm_debug, level, data, length);
}
static void orb_init(union orb *orb)
{
memset(orb, 0, sizeof(union orb));
orb->eadm.compat1 = 1;
orb->eadm.compat2 = 1;
orb->eadm.fmt = 1;
orb->eadm.x = 1;
}
static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
{
union orb *orb = &get_eadm_private(sch)->orb;
int cc;
orb_init(orb);
orb->eadm.aob = (u32)virt_to_phys(aob);
orb->eadm.intparm = (u32)virt_to_phys(sch);
orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
EADM_LOG(6, "start");
EADM_LOG_HEX(6, &sch->schid, sizeof(sch->schid));
cc = ssch(sch->schid, orb);
switch (cc) {
case 0:
sch->schib.scsw.eadm.actl |= SCSW_ACTL_START_PEND;
break;
case 1: /* status pending */
case 2: /* busy */
return -EBUSY;
case 3: /* not operational */
return -ENODEV;
}
return 0;
}
static int eadm_subchannel_clear(struct subchannel *sch)
{
int cc;
cc = csch(sch->schid);
if (cc)
return -ENODEV;
sch->schib.scsw.eadm.actl |= SCSW_ACTL_CLEAR_PEND;
return 0;
}
static void eadm_subchannel_timeout(struct timer_list *t)
{
struct eadm_private *private = from_timer(private, t, timer);
struct subchannel *sch = private->sch;
spin_lock_irq(sch->lock);
EADM_LOG(1, "timeout");
EADM_LOG_HEX(1, &sch->schid, sizeof(sch->schid));
if (eadm_subchannel_clear(sch))
EADM_LOG(0, "clear failed");
spin_unlock_irq(sch->lock);
}
static void eadm_subchannel_set_timeout(struct subchannel *sch, int expires)
{
struct eadm_private *private = get_eadm_private(sch);
if (expires == 0)
del_timer(&private->timer);
else
mod_timer(&private->timer, jiffies + expires);
}
static void eadm_subchannel_irq(struct subchannel *sch)
{
struct eadm_private *private = get_eadm_private(sch);
struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
struct irb *irb = this_cpu_ptr(&cio_irb);
blk_status_t error = BLK_STS_OK;
EADM_LOG(6, "irq");
EADM_LOG_HEX(6, irb, sizeof(*irb));
inc_irq_stat(IRQIO_ADM);
if ((scsw->stctl & (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))
&& scsw->eswf == 1 && irb->esw.eadm.erw.r)
error = BLK_STS_IOERR;
if (scsw->fctl & SCSW_FCTL_CLEAR_FUNC)
error = BLK_STS_TIMEOUT;
eadm_subchannel_set_timeout(sch, 0);
if (private->state != EADM_BUSY) {
EADM_LOG(1, "irq unsol");
EADM_LOG_HEX(1, irb, sizeof(*irb));
private->state = EADM_NOT_OPER;
css_sched_sch_todo(sch, SCH_TODO_EVAL);
return;
}
scm_irq_handler(phys_to_virt(scsw->aob), error);
private->state = EADM_IDLE;
if (private->completion)
complete(private->completion);
}
static struct subchannel *eadm_get_idle_sch(void)
{
struct eadm_private *private;
struct subchannel *sch;
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
list_for_each_entry(private, &eadm_list, head) {
sch = private->sch;
spin_lock(sch->lock);
if (private->state == EADM_IDLE) {
private->state = EADM_BUSY;
list_move_tail(&private->head, &eadm_list);
spin_unlock(sch->lock);
spin_unlock_irqrestore(&list_lock, flags);
return sch;
}
spin_unlock(sch->lock);
}
spin_unlock_irqrestore(&list_lock, flags);
return NULL;
}
int eadm_start_aob(struct aob *aob)
{
struct eadm_private *private;
struct subchannel *sch;
unsigned long flags;
int ret;
sch = eadm_get_idle_sch();
if (!sch)
return -EBUSY;
spin_lock_irqsave(sch->lock, flags);
eadm_subchannel_set_timeout(sch, EADM_TIMEOUT);
ret = eadm_subchannel_start(sch, aob);
if (!ret)
goto out_unlock;
/* Handle start subchannel failure. */
eadm_subchannel_set_timeout(sch, 0);
private = get_eadm_private(sch);
private->state = EADM_NOT_OPER;
css_sched_sch_todo(sch, SCH_TODO_EVAL);
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(eadm_start_aob);
static int eadm_subchannel_probe(struct subchannel *sch)
{
struct eadm_private *private;
int ret;
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private)
return -ENOMEM;
INIT_LIST_HEAD(&private->head);
timer_setup(&private->timer, eadm_subchannel_timeout, 0);
spin_lock_irq(sch->lock);
set_eadm_private(sch, private);
private->state = EADM_IDLE;
private->sch = sch;
sch->isc = EADM_SCH_ISC;
ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
if (ret) {
set_eadm_private(sch, NULL);
spin_unlock_irq(sch->lock);
kfree(private);
goto out;
}
spin_unlock_irq(sch->lock);
spin_lock_irq(&list_lock);
list_add(&private->head, &eadm_list);
spin_unlock_irq(&list_lock);
out:
return ret;
}
static void eadm_quiesce(struct subchannel *sch)
{
struct eadm_private *private = get_eadm_private(sch);
DECLARE_COMPLETION_ONSTACK(completion);
int ret;
spin_lock_irq(sch->lock);
if (private->state != EADM_BUSY)
goto disable;
if (eadm_subchannel_clear(sch))
goto disable;
private->completion = &completion;
spin_unlock_irq(sch->lock);
wait_for_completion_io(&completion);
spin_lock_irq(sch->lock);
private->completion = NULL;
disable:
eadm_subchannel_set_timeout(sch, 0);
do {
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
spin_unlock_irq(sch->lock);
}
static void eadm_subchannel_remove(struct subchannel *sch)
{
struct eadm_private *private = get_eadm_private(sch);
spin_lock_irq(&list_lock);
list_del(&private->head);
spin_unlock_irq(&list_lock);
eadm_quiesce(sch);
spin_lock_irq(sch->lock);
set_eadm_private(sch, NULL);
spin_unlock_irq(sch->lock);
kfree(private);
}
static void eadm_subchannel_shutdown(struct subchannel *sch)
{
eadm_quiesce(sch);
}
/**
* eadm_subchannel_sch_event - process subchannel event
* @sch: subchannel
* @process: non-zero if function is called in process context
*
* An unspecified event occurred for this subchannel. Adjust data according
* to the current operational state of the subchannel. Return zero when the
* event has been handled sufficiently or -EAGAIN when this function should
* be called again in process context.
*/
static int eadm_subchannel_sch_event(struct subchannel *sch, int process)
{
struct eadm_private *private;
unsigned long flags;
spin_lock_irqsave(sch->lock, flags);
if (!device_is_registered(&sch->dev))
goto out_unlock;
if (work_pending(&sch->todo_work))
goto out_unlock;
if (cio_update_schib(sch)) {
css_sched_sch_todo(sch, SCH_TODO_UNREG);
goto out_unlock;
}
private = get_eadm_private(sch);
if (private->state == EADM_NOT_OPER)
private->state = EADM_IDLE;
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
return 0;
}
static struct css_device_id eadm_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_ADM, },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(css, eadm_subchannel_ids);
static struct css_driver eadm_subchannel_driver = {
.drv = {
.name = "eadm_subchannel",
.owner = THIS_MODULE,
},
.subchannel_type = eadm_subchannel_ids,
.irq = eadm_subchannel_irq,
.probe = eadm_subchannel_probe,
.remove = eadm_subchannel_remove,
.shutdown = eadm_subchannel_shutdown,
.sch_event = eadm_subchannel_sch_event,
};
static int __init eadm_sch_init(void)
{
int ret;
if (!css_general_characteristics.eadm)
return -ENXIO;
eadm_debug = debug_register("eadm_log", 16, 1, 16);
if (!eadm_debug)
return -ENOMEM;
debug_register_view(eadm_debug, &debug_hex_ascii_view);
debug_set_level(eadm_debug, 2);
isc_register(EADM_SCH_ISC);
ret = css_driver_register(&eadm_subchannel_driver);
if (ret)
goto cleanup;
return ret;
cleanup:
isc_unregister(EADM_SCH_ISC);
debug_unregister(eadm_debug);
return ret;
}
static void __exit eadm_sch_exit(void)
{
css_driver_unregister(&eadm_subchannel_driver);
isc_unregister(EADM_SCH_ISC);
debug_unregister(eadm_debug);
}
module_init(eadm_sch_init);
module_exit(eadm_sch_exit);
| linux-master | drivers/s390/cio/eadm_sch.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions for registration of I/O interruption subclasses on s390.
*
* Copyright IBM Corp. 2008
* Authors: Sebastian Ott <[email protected]>
*/
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/isc.h>
static unsigned int isc_refs[MAX_ISC + 1];
static DEFINE_SPINLOCK(isc_ref_lock);
/**
* isc_register - register an I/O interruption subclass.
* @isc: I/O interruption subclass to register
*
* The number of users for @isc is increased. If this is the first user to
* register @isc, the corresponding I/O interruption subclass mask is enabled.
*
* Context:
* This function must not be called in interrupt context.
*/
void isc_register(unsigned int isc)
{
if (isc > MAX_ISC) {
WARN_ON(1);
return;
}
spin_lock(&isc_ref_lock);
if (isc_refs[isc] == 0)
ctl_set_bit(6, 31 - isc);
isc_refs[isc]++;
spin_unlock(&isc_ref_lock);
}
EXPORT_SYMBOL_GPL(isc_register);
/**
* isc_unregister - unregister an I/O interruption subclass.
* @isc: I/O interruption subclass to unregister
*
* The number of users for @isc is decreased. If this is the last user to
* unregister @isc, the corresponding I/O interruption subclass mask is
* disabled.
* Note: This function must not be called if isc_register() hasn't been called
* before by the driver for @isc.
*
* Context:
* This function must not be called in interrupt context.
*/
void isc_unregister(unsigned int isc)
{
spin_lock(&isc_ref_lock);
/* check for misuse */
if (isc > MAX_ISC || isc_refs[isc] == 0) {
WARN_ON(1);
goto out_unlock;
}
if (isc_refs[isc] == 1)
ctl_clear_bit(6, 31 - isc);
isc_refs[isc]--;
out_unlock:
spin_unlock(&isc_ref_lock);
}
EXPORT_SYMBOL_GPL(isc_unregister);
| linux-master | drivers/s390/cio/isc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* driver for channel subsystem
*
* Copyright IBM Corp. 2002, 2010
*
* Author(s): Arnd Bergmann ([email protected])
* Cornelia Huck ([email protected])
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/export.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/reboot.h>
#include <linux/proc_fs.h>
#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
#include <asm/isc.h>
#include <asm/crw.h>
#include "css.h"
#include "cio.h"
#include "blacklist.h"
#include "cio_debug.h"
#include "ioasm.h"
#include "chsc.h"
#include "device.h"
#include "idset.h"
#include "chp.h"
int css_init_done = 0;
int max_ssid;
#define MAX_CSS_IDX 0
struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
static struct bus_type css_bus_type;
int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
{
struct subchannel_id schid;
int ret;
init_subchannel_id(&schid);
do {
do {
ret = fn(schid, data);
if (ret)
break;
} while (schid.sch_no++ < __MAX_SUBCHANNEL);
schid.sch_no = 0;
} while (schid.ssid++ < max_ssid);
return ret;
}
struct cb_data {
void *data;
struct idset *set;
int (*fn_known_sch)(struct subchannel *, void *);
int (*fn_unknown_sch)(struct subchannel_id, void *);
};
static int call_fn_known_sch(struct device *dev, void *data)
{
struct subchannel *sch = to_subchannel(dev);
struct cb_data *cb = data;
int rc = 0;
if (cb->set)
idset_sch_del(cb->set, sch->schid);
if (cb->fn_known_sch)
rc = cb->fn_known_sch(sch, cb->data);
return rc;
}
static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
{
struct cb_data *cb = data;
int rc = 0;
if (idset_sch_contains(cb->set, schid))
rc = cb->fn_unknown_sch(schid, cb->data);
return rc;
}
static int call_fn_all_sch(struct subchannel_id schid, void *data)
{
struct cb_data *cb = data;
struct subchannel *sch;
int rc = 0;
sch = get_subchannel_by_schid(schid);
if (sch) {
if (cb->fn_known_sch)
rc = cb->fn_known_sch(sch, cb->data);
put_device(&sch->dev);
} else {
if (cb->fn_unknown_sch)
rc = cb->fn_unknown_sch(schid, cb->data);
}
return rc;
}
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
int (*fn_unknown)(struct subchannel_id,
void *), void *data)
{
struct cb_data cb;
int rc;
cb.data = data;
cb.fn_known_sch = fn_known;
cb.fn_unknown_sch = fn_unknown;
if (fn_known && !fn_unknown) {
/* Skip idset allocation in case of known-only loop. */
cb.set = NULL;
return bus_for_each_dev(&css_bus_type, NULL, &cb,
call_fn_known_sch);
}
cb.set = idset_sch_new();
if (!cb.set)
/* fall back to brute force scanning in case of oom */
return for_each_subchannel(call_fn_all_sch, &cb);
idset_fill(cb.set);
/* Process registered subchannels. */
rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
if (rc)
goto out;
/* Process unregistered subchannels. */
if (fn_unknown)
rc = for_each_subchannel(call_fn_unknown_sch, &cb);
out:
idset_free(cb.set);
return rc;
}
static void css_sch_todo(struct work_struct *work);
static int css_sch_create_locks(struct subchannel *sch)
{
sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
if (!sch->lock)
return -ENOMEM;
spin_lock_init(sch->lock);
mutex_init(&sch->reg_mutex);
return 0;
}
static void css_subchannel_release(struct device *dev)
{
struct subchannel *sch = to_subchannel(dev);
sch->config.intparm = 0;
cio_commit_config(sch);
kfree(sch->driver_override);
kfree(sch->lock);
kfree(sch);
}
static int css_validate_subchannel(struct subchannel_id schid,
struct schib *schib)
{
int err;
switch (schib->pmcw.st) {
case SUBCHANNEL_TYPE_IO:
case SUBCHANNEL_TYPE_MSG:
if (!css_sch_is_valid(schib))
err = -ENODEV;
else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
CIO_MSG_EVENT(6, "Blacklisted device detected "
"at devno %04X, subchannel set %x\n",
schib->pmcw.dev, schid.ssid);
err = -ENODEV;
} else
err = 0;
break;
default:
err = 0;
}
if (err)
goto out;
CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
schid.ssid, schid.sch_no, schib->pmcw.st);
out:
return err;
}
struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
struct schib *schib)
{
struct subchannel *sch;
int ret;
ret = css_validate_subchannel(schid, schib);
if (ret < 0)
return ERR_PTR(ret);
sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
if (!sch)
return ERR_PTR(-ENOMEM);
sch->schid = schid;
sch->schib = *schib;
sch->st = schib->pmcw.st;
ret = css_sch_create_locks(sch);
if (ret)
goto err;
INIT_WORK(&sch->todo_work, css_sch_todo);
sch->dev.release = &css_subchannel_release;
sch->dev.dma_mask = &sch->dma_mask;
device_initialize(&sch->dev);
/*
* The physical addresses for some of the dma structures that can
* belong to a subchannel need to fit 31 bit width (e.g. ccw).
*/
ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
if (ret)
goto err;
/*
* But we don't have such restrictions imposed on the stuff that
* is handled by the streaming API.
*/
ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
if (ret)
goto err;
return sch;
err:
kfree(sch);
return ERR_PTR(ret);
}
static int css_sch_device_register(struct subchannel *sch)
{
int ret;
mutex_lock(&sch->reg_mutex);
dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
sch->schid.sch_no);
ret = device_add(&sch->dev);
mutex_unlock(&sch->reg_mutex);
return ret;
}
/**
* css_sch_device_unregister - unregister a subchannel
* @sch: subchannel to be unregistered
*/
void css_sch_device_unregister(struct subchannel *sch)
{
mutex_lock(&sch->reg_mutex);
if (device_is_registered(&sch->dev))
device_unregister(&sch->dev);
mutex_unlock(&sch->reg_mutex);
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
int mask;
memset(ssd, 0, sizeof(struct chsc_ssd_info));
ssd->path_mask = pmcw->pim;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (pmcw->pim & mask) {
chp_id_init(&ssd->chpid[i]);
ssd->chpid[i].id = pmcw->chpid[i];
}
}
}
static void ssd_register_chpids(struct chsc_ssd_info *ssd)
{
int i;
int mask;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (ssd->path_mask & mask)
chp_new(ssd->chpid[i]);
}
}
void css_update_ssd_info(struct subchannel *sch)
{
int ret;
ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
if (ret)
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
ssd_register_chpids(&sch->ssd_info);
}
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
return sprintf(buf, "%01x\n", sch->st);
}
static DEVICE_ATTR_RO(type);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
return sprintf(buf, "css:t%01X\n", sch->st);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct subchannel *sch = to_subchannel(dev);
int ret;
ret = driver_set_override(dev, &sch->driver_override, buf, count);
if (ret)
return ret;
return count;
}
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct subchannel *sch = to_subchannel(dev);
ssize_t len;
device_lock(dev);
len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);
static struct attribute *subch_attrs[] = {
&dev_attr_type.attr,
&dev_attr_modalias.attr,
&dev_attr_driver_override.attr,
NULL,
};
static struct attribute_group subch_attr_group = {
.attrs = subch_attrs,
};
static const struct attribute_group *default_subch_attr_groups[] = {
&subch_attr_group,
NULL,
};
static ssize_t chpids_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
struct chsc_ssd_info *ssd = &sch->ssd_info;
ssize_t ret = 0;
int mask;
int chp;
for (chp = 0; chp < 8; chp++) {
mask = 0x80 >> chp;
if (ssd->path_mask & mask)
ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
else
ret += sprintf(buf + ret, "00 ");
}
ret += sprintf(buf + ret, "\n");
return ret;
}
static DEVICE_ATTR_RO(chpids);
static ssize_t pimpampom_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
struct pmcw *pmcw = &sch->schib.pmcw;
return sprintf(buf, "%02x %02x %02x\n",
pmcw->pim, pmcw->pam, pmcw->pom);
}
static DEVICE_ATTR_RO(pimpampom);
static ssize_t dev_busid_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
struct pmcw *pmcw = &sch->schib.pmcw;
if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
(pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
pmcw->dev);
else
return sysfs_emit(buf, "none\n");
}
static DEVICE_ATTR_RO(dev_busid);
static struct attribute *io_subchannel_type_attrs[] = {
&dev_attr_chpids.attr,
&dev_attr_pimpampom.attr,
&dev_attr_dev_busid.attr,
NULL,
};
ATTRIBUTE_GROUPS(io_subchannel_type);
static const struct device_type io_subchannel_type = {
.groups = io_subchannel_type_groups,
};
int css_register_subchannel(struct subchannel *sch)
{
int ret;
/* Initialize the subchannel structure */
sch->dev.parent = &channel_subsystems[0]->device;
sch->dev.bus = &css_bus_type;
sch->dev.groups = default_subch_attr_groups;
if (sch->st == SUBCHANNEL_TYPE_IO)
sch->dev.type = &io_subchannel_type;
css_update_ssd_info(sch);
/* make it known to the system */
ret = css_sch_device_register(sch);
if (ret) {
CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
return ret;
}
return ret;
}
static int css_probe_device(struct subchannel_id schid, struct schib *schib)
{
struct subchannel *sch;
int ret;
sch = css_alloc_subchannel(schid, schib);
if (IS_ERR(sch))
return PTR_ERR(sch);
ret = css_register_subchannel(sch);
if (ret)
put_device(&sch->dev);
return ret;
}
static int
check_subchannel(struct device *dev, const void *data)
{
struct subchannel *sch;
struct subchannel_id *schid = (void *)data;
sch = to_subchannel(dev);
return schid_equal(&sch->schid, schid);
}
struct subchannel *
get_subchannel_by_schid(struct subchannel_id schid)
{
struct device *dev;
dev = bus_find_device(&css_bus_type, NULL,
&schid, check_subchannel);
return dev ? to_subchannel(dev) : NULL;
}
/**
* css_sch_is_valid() - check if a subchannel is valid
* @schib: subchannel information block for the subchannel
*/
int css_sch_is_valid(struct schib *schib)
{
if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
return 0;
if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(css_sch_is_valid);
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
{
struct schib schib;
int ccode;
if (!slow) {
/* Will be done on the slow path. */
return -EAGAIN;
}
/*
* The first subchannel that is not-operational (ccode==3)
* indicates that there aren't any more devices available.
* If stsch gets an exception, it means the current subchannel set
* is not valid.
*/
ccode = stsch(schid, &schib);
if (ccode)
return (ccode == 3) ? -ENXIO : ccode;
return css_probe_device(schid, &schib);
}
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
{
int ret = 0;
if (sch->driver) {
if (sch->driver->sch_event)
ret = sch->driver->sch_event(sch, slow);
else
dev_dbg(&sch->dev,
"Got subchannel machine check but "
"no sch_event handler provided.\n");
}
if (ret != 0 && ret != -EAGAIN) {
CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
sch->schid.ssid, sch->schid.sch_no, ret);
}
return ret;
}
static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
{
struct subchannel *sch;
int ret;
sch = get_subchannel_by_schid(schid);
if (sch) {
ret = css_evaluate_known_subchannel(sch, slow);
put_device(&sch->dev);
} else
ret = css_evaluate_new_subchannel(schid, slow);
if (ret == -EAGAIN)
css_schedule_eval(schid);
}
/**
* css_sched_sch_todo - schedule a subchannel operation
* @sch: subchannel
* @todo: todo
*
* Schedule the operation identified by @todo to be performed on the slow path
* workqueue. Do nothing if another operation with higher priority is already
* scheduled. Needs to be called with subchannel lock held.
*/
void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
{
CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
sch->schid.ssid, sch->schid.sch_no, todo);
if (sch->todo >= todo)
return;
/* Get workqueue ref. */
if (!get_device(&sch->dev))
return;
sch->todo = todo;
if (!queue_work(cio_work_q, &sch->todo_work)) {
/* Already queued, release workqueue ref. */
put_device(&sch->dev);
}
}
EXPORT_SYMBOL_GPL(css_sched_sch_todo);
static void css_sch_todo(struct work_struct *work)
{
struct subchannel *sch;
enum sch_todo todo;
int ret;
sch = container_of(work, struct subchannel, todo_work);
/* Find out todo. */
spin_lock_irq(sch->lock);
todo = sch->todo;
CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
sch->schid.sch_no, todo);
sch->todo = SCH_TODO_NOTHING;
spin_unlock_irq(sch->lock);
/* Perform todo. */
switch (todo) {
case SCH_TODO_NOTHING:
break;
case SCH_TODO_EVAL:
ret = css_evaluate_known_subchannel(sch, 1);
if (ret == -EAGAIN) {
spin_lock_irq(sch->lock);
css_sched_sch_todo(sch, todo);
spin_unlock_irq(sch->lock);
}
break;
case SCH_TODO_UNREG:
css_sch_device_unregister(sch);
break;
}
/* Release workqueue ref. */
put_device(&sch->dev);
}
static struct idset *slow_subchannel_set;
static DEFINE_SPINLOCK(slow_subchannel_lock);
static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
static atomic_t css_eval_scheduled;
static int __init slow_subchannel_init(void)
{
atomic_set(&css_eval_scheduled, 0);
slow_subchannel_set = idset_sch_new();
if (!slow_subchannel_set) {
CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
return -ENOMEM;
}
return 0;
}
static int slow_eval_known_fn(struct subchannel *sch, void *data)
{
int eval;
int rc;
spin_lock_irq(&slow_subchannel_lock);
eval = idset_sch_contains(slow_subchannel_set, sch->schid);
idset_sch_del(slow_subchannel_set, sch->schid);
spin_unlock_irq(&slow_subchannel_lock);
if (eval) {
rc = css_evaluate_known_subchannel(sch, 1);
if (rc == -EAGAIN)
css_schedule_eval(sch->schid);
/*
* The loop might take long time for platforms with lots of
* known devices. Allow scheduling here.
*/
cond_resched();
}
return 0;
}
static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
{
int eval;
int rc = 0;
spin_lock_irq(&slow_subchannel_lock);
eval = idset_sch_contains(slow_subchannel_set, schid);
idset_sch_del(slow_subchannel_set, schid);
spin_unlock_irq(&slow_subchannel_lock);
if (eval) {
rc = css_evaluate_new_subchannel(schid, 1);
switch (rc) {
case -EAGAIN:
css_schedule_eval(schid);
rc = 0;
break;
case -ENXIO:
case -ENOMEM:
case -EIO:
/* These should abort looping */
spin_lock_irq(&slow_subchannel_lock);
idset_sch_del_subseq(slow_subchannel_set, schid);
spin_unlock_irq(&slow_subchannel_lock);
break;
default:
rc = 0;
}
/* Allow scheduling here since the containing loop might
* take a while. */
cond_resched();
}
return rc;
}
static void css_slow_path_func(struct work_struct *unused)
{
unsigned long flags;
CIO_TRACE_EVENT(4, "slowpath");
for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
NULL);
spin_lock_irqsave(&slow_subchannel_lock, flags);
if (idset_is_empty(slow_subchannel_set)) {
atomic_set(&css_eval_scheduled, 0);
wake_up(&css_eval_wq);
}
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid)
{
unsigned long flags;
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1);
queue_delayed_work(cio_work_q, &slow_path_work, 0);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
void css_schedule_eval_all(void)
{
unsigned long flags;
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1);
queue_delayed_work(cio_work_q, &slow_path_work, 0);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
static int __unset_validpath(struct device *dev, void *data)
{
struct idset *set = data;
struct subchannel *sch = to_subchannel(dev);
struct pmcw *pmcw = &sch->schib.pmcw;
/* Here we want to make sure that we are considering only those subchannels
* which do not have an operational device attached to it. This can be found
* with the help of PAM and POM values of pmcw. OPM provides the information
* about any path which is currently vary-off, so that we should not consider.
*/
if (sch->st == SUBCHANNEL_TYPE_IO &&
(sch->opm & pmcw->pam & pmcw->pom))
idset_sch_del(set, sch->schid);
return 0;
}
static int __unset_online(struct device *dev, void *data)
{
struct idset *set = data;
struct subchannel *sch = to_subchannel(dev);
if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena)
idset_sch_del(set, sch->schid);
return 0;
}
void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
{
unsigned long flags;
struct idset *set;
/* Find unregistered subchannels. */
set = idset_sch_new();
if (!set) {
/* Fallback. */
css_schedule_eval_all();
return;
}
idset_fill(set);
switch (cond) {
case CSS_EVAL_NO_PATH:
bus_for_each_dev(&css_bus_type, NULL, set, __unset_validpath);
break;
case CSS_EVAL_NOT_ONLINE:
bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
break;
default:
break;
}
/* Apply to slow_subchannel_set. */
spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, set);
atomic_set(&css_eval_scheduled, 1);
queue_delayed_work(cio_work_q, &slow_path_work, delay);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(set);
}
void css_wait_for_slow_path(void)
{
flush_workqueue(cio_work_q);
}
/* Schedule reprobing of all subchannels with no valid operational path. */
void css_schedule_reprobe(void)
{
/* Schedule with a delay to allow merging of subsequent calls. */
css_schedule_eval_cond(CSS_EVAL_NO_PATH, 1 * HZ);
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
/*
* Called from the machine check handler for subchannel report words.
*/
static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{
struct subchannel_id mchk_schid;
struct subchannel *sch;
if (overflow) {
css_schedule_eval_all();
return;
}
CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
if (crw1)
CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
crw1->anc, crw1->erc, crw1->rsid);
init_subchannel_id(&mchk_schid);
mchk_schid.sch_no = crw0->rsid;
if (crw1)
mchk_schid.ssid = (crw1->rsid >> 4) & 3;
if (crw0->erc == CRW_ERC_PMOD) {
sch = get_subchannel_by_schid(mchk_schid);
if (sch) {
css_update_ssd_info(sch);
put_device(&sch->dev);
}
}
/*
* Since we are always presented with IPI in the CRW, we have to
* use stsch() to find out if the subchannel in question has come
* or gone.
*/
css_evaluate_subchannel(mchk_schid, 0);
}
static void __init
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
{
struct cpuid cpu_id;
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid =
css->id_valid ? css->cssid : 0;
} else {
css->global_pgid.pgid_high.cpu_addr = stap();
}
get_cpu_id(&cpu_id);
css->global_pgid.cpu_id = cpu_id.ident;
css->global_pgid.cpu_model = cpu_id.machine;
css->global_pgid.tod_high = tod_high;
}
static void channel_subsystem_release(struct device *dev)
{
struct channel_subsystem *css = to_css(dev);
mutex_destroy(&css->mutex);
kfree(css);
}
static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct channel_subsystem *css = to_css(dev);
if (!css->id_valid)
return -EINVAL;
return sprintf(buf, "%x\n", css->cssid);
}
static DEVICE_ATTR_RO(real_cssid);
static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
const char *buf, size_t count)
{
CIO_TRACE_EVENT(4, "usr-rescan");
css_schedule_eval_all();
css_complete_work();
return count;
}
static DEVICE_ATTR_WO(rescan);
static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct channel_subsystem *css = to_css(dev);
int ret;
mutex_lock(&css->mutex);
ret = sprintf(buf, "%x\n", css->cm_enabled);
mutex_unlock(&css->mutex);
return ret;
}
static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
const char *buf, size_t count)
{
struct channel_subsystem *css = to_css(dev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
mutex_lock(&css->mutex);
switch (val) {
case 0:
ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
break;
case 1:
ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&css->mutex);
return ret < 0 ? ret : count;
}
static DEVICE_ATTR_RW(cm_enable);
static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
int index)
{
return css_chsc_characteristics.secm ? attr->mode : 0;
}
static struct attribute *cssdev_attrs[] = {
&dev_attr_real_cssid.attr,
&dev_attr_rescan.attr,
NULL,
};
static struct attribute_group cssdev_attr_group = {
.attrs = cssdev_attrs,
};
static struct attribute *cssdev_cm_attrs[] = {
&dev_attr_cm_enable.attr,
NULL,
};
static struct attribute_group cssdev_cm_attr_group = {
.attrs = cssdev_cm_attrs,
.is_visible = cm_enable_mode,
};
static const struct attribute_group *cssdev_attr_groups[] = {
&cssdev_attr_group,
&cssdev_cm_attr_group,
NULL,
};
static int __init setup_css(int nr)
{
struct channel_subsystem *css;
int ret;
css = kzalloc(sizeof(*css), GFP_KERNEL);
if (!css)
return -ENOMEM;
channel_subsystems[nr] = css;
dev_set_name(&css->device, "css%x", nr);
css->device.groups = cssdev_attr_groups;
css->device.release = channel_subsystem_release;
/*
* We currently allocate notifier bits with this (using
* css->device as the device argument with the DMA API)
* and are fine with 64 bit addresses.
*/
ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
if (ret) {
kfree(css);
goto out_err;
}
mutex_init(&css->mutex);
ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
if (!ret) {
css->id_valid = true;
pr_info("Partition identifier %01x.%01x\n", css->cssid,
css->iid);
}
css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
ret = device_register(&css->device);
if (ret) {
put_device(&css->device);
goto out_err;
}
css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
GFP_KERNEL);
if (!css->pseudo_subchannel) {
device_unregister(&css->device);
ret = -ENOMEM;
goto out_err;
}
css->pseudo_subchannel->dev.parent = &css->device;
css->pseudo_subchannel->dev.release = css_subchannel_release;
mutex_init(&css->pseudo_subchannel->reg_mutex);
ret = css_sch_create_locks(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
device_unregister(&css->device);
goto out_err;
}
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
ret = device_register(&css->pseudo_subchannel->dev);
if (ret) {
put_device(&css->pseudo_subchannel->dev);
device_unregister(&css->device);
goto out_err;
}
return ret;
out_err:
channel_subsystems[nr] = NULL;
return ret;
}
static int css_reboot_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
struct channel_subsystem *css;
int ret;
ret = NOTIFY_DONE;
for_each_css(css) {
mutex_lock(&css->mutex);
if (css->cm_enabled)
if (chsc_secm(css, 0))
ret = NOTIFY_BAD;
mutex_unlock(&css->mutex);
}
return ret;
}
static struct notifier_block css_reboot_notifier = {
.notifier_call = css_reboot_event,
};
#define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
static struct gen_pool *cio_dma_pool;
/* Currently cio supports only a single css */
struct device *cio_get_dma_css_dev(void)
{
return &channel_subsystems[0]->device;
}
struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
{
struct gen_pool *gp_dma;
void *cpu_addr;
dma_addr_t dma_addr;
int i;
gp_dma = gen_pool_create(3, -1);
if (!gp_dma)
return NULL;
for (i = 0; i < nr_pages; ++i) {
cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
CIO_DMA_GFP);
if (!cpu_addr)
return gp_dma;
gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
dma_addr, PAGE_SIZE, -1);
}
return gp_dma;
}
static void __gp_dma_free_dma(struct gen_pool *pool,
struct gen_pool_chunk *chunk, void *data)
{
size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
dma_free_coherent((struct device *) data, chunk_size,
(void *) chunk->start_addr,
(dma_addr_t) chunk->phys_addr);
}
void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
{
if (!gp_dma)
return;
/* this is quite ugly but no better idea */
gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
gen_pool_destroy(gp_dma);
}
static int cio_dma_pool_init(void)
{
/* No need to free up the resources: compiled in */
cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
if (!cio_dma_pool)
return -ENOMEM;
return 0;
}
void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
size_t size)
{
dma_addr_t dma_addr;
unsigned long addr;
size_t chunk_size;
if (!gp_dma)
return NULL;
addr = gen_pool_alloc(gp_dma, size);
while (!addr) {
chunk_size = round_up(size, PAGE_SIZE);
addr = (unsigned long) dma_alloc_coherent(dma_dev,
chunk_size, &dma_addr, CIO_DMA_GFP);
if (!addr)
return NULL;
gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
addr = gen_pool_alloc(gp_dma, size);
}
return (void *) addr;
}
void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
{
if (!cpu_addr)
return;
memset(cpu_addr, 0, size);
gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
}
/*
* Allocate dma memory from the css global pool. Intended for memory not
* specific to any single device within the css. The allocated memory
* is not guaranteed to be 31-bit addressable.
*
* Caution: Not suitable for early stuff like console.
*/
void *cio_dma_zalloc(size_t size)
{
return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
}
void cio_dma_free(void *cpu_addr, size_t size)
{
cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
}
/*
* Now that the driver core is running, we can setup our channel subsystem.
* The struct subchannel's are created during probing.
*/
static int __init css_bus_init(void)
{
int ret, i;
ret = chsc_init();
if (ret)
return ret;
chsc_determine_css_characteristics();
/* Try to enable MSS. */
ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
if (ret)
max_ssid = 0;
else /* Success. */
max_ssid = __MAX_SSID;
ret = slow_subchannel_init();
if (ret)
goto out;
ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
if (ret)
goto out;
if ((ret = bus_register(&css_bus_type)))
goto out;
/* Setup css structure. */
for (i = 0; i <= MAX_CSS_IDX; i++) {
ret = setup_css(i);
if (ret)
goto out_unregister;
}
ret = register_reboot_notifier(&css_reboot_notifier);
if (ret)
goto out_unregister;
ret = cio_dma_pool_init();
if (ret)
goto out_unregister_rn;
airq_init();
css_init_done = 1;
/* Enable default isc for I/O subchannels. */
isc_register(IO_SCH_ISC);
return 0;
out_unregister_rn:
unregister_reboot_notifier(&css_reboot_notifier);
out_unregister:
while (i-- > 0) {
struct channel_subsystem *css = channel_subsystems[i];
device_unregister(&css->pseudo_subchannel->dev);
device_unregister(&css->device);
}
bus_unregister(&css_bus_type);
out:
crw_unregister_handler(CRW_RSC_SCH);
idset_free(slow_subchannel_set);
chsc_init_cleanup();
pr_alert("The CSS device driver initialization failed with "
"errno=%d\n", ret);
return ret;
}
static void __init css_bus_cleanup(void)
{
struct channel_subsystem *css;
for_each_css(css) {
device_unregister(&css->pseudo_subchannel->dev);
device_unregister(&css->device);
}
bus_unregister(&css_bus_type);
crw_unregister_handler(CRW_RSC_SCH);
idset_free(slow_subchannel_set);
chsc_init_cleanup();
isc_unregister(IO_SCH_ISC);
}
static int __init channel_subsystem_init(void)
{
int ret;
ret = css_bus_init();
if (ret)
return ret;
cio_work_q = create_singlethread_workqueue("cio");
if (!cio_work_q) {
ret = -ENOMEM;
goto out_bus;
}
ret = io_subchannel_init();
if (ret)
goto out_wq;
/* Register subchannels which are already in use. */
cio_register_early_subchannels();
/* Start initial subchannel evaluation. */
css_schedule_eval_all();
return ret;
out_wq:
destroy_workqueue(cio_work_q);
out_bus:
css_bus_cleanup();
return ret;
}
subsys_initcall(channel_subsystem_init);
static int css_settle(struct device_driver *drv, void *unused)
{
struct css_driver *cssdrv = to_cssdriver(drv);
if (cssdrv->settle)
return cssdrv->settle();
return 0;
}
int css_complete_work(void)
{
int ret;
/* Wait for the evaluation of subchannels to finish. */
ret = wait_event_interruptible(css_eval_wq,
atomic_read(&css_eval_scheduled) == 0);
if (ret)
return -EINTR;
flush_workqueue(cio_work_q);
/* Wait for the subchannel type specific initialization to finish */
return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
}
/*
* Wait for the initialization of devices to finish, to make sure we are
* done with our setup if the search for the root device starts.
*/
static int __init channel_subsystem_init_sync(void)
{
css_complete_work();
return 0;
}
subsys_initcall_sync(channel_subsystem_init_sync);
#ifdef CONFIG_PROC_FS
static ssize_t cio_settle_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int ret;
/* Handle pending CRW's. */
crw_wait_for_channel_report();
ret = css_complete_work();
return ret ? ret : count;
}
static const struct proc_ops cio_settle_proc_ops = {
.proc_open = nonseekable_open,
.proc_write = cio_settle_write,
.proc_lseek = no_llseek,
};
static int __init cio_settle_init(void)
{
struct proc_dir_entry *entry;
entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
if (!entry)
return -ENOMEM;
return 0;
}
device_initcall(cio_settle_init);
#endif /*CONFIG_PROC_FS*/
int sch_is_pseudo_sch(struct subchannel *sch)
{
if (!sch->dev.parent)
return 0;
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
}
static int css_bus_match(struct device *dev, struct device_driver *drv)
{
struct subchannel *sch = to_subchannel(dev);
struct css_driver *driver = to_cssdriver(drv);
struct css_device_id *id;
/* When driver_override is set, only bind to the matching driver */
if (sch->driver_override && strcmp(sch->driver_override, drv->name))
return 0;
for (id = driver->subchannel_type; id->match_flags; id++) {
if (sch->st == id->type)
return 1;
}
return 0;
}
static int css_probe(struct device *dev)
{
struct subchannel *sch;
int ret;
sch = to_subchannel(dev);
sch->driver = to_cssdriver(dev->driver);
ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
if (ret)
sch->driver = NULL;
return ret;
}
static void css_remove(struct device *dev)
{
struct subchannel *sch;
sch = to_subchannel(dev);
if (sch->driver->remove)
sch->driver->remove(sch);
sch->driver = NULL;
}
static void css_shutdown(struct device *dev)
{
struct subchannel *sch;
sch = to_subchannel(dev);
if (sch->driver && sch->driver->shutdown)
sch->driver->shutdown(sch);
}
static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct subchannel *sch = to_subchannel(dev);
int ret;
ret = add_uevent_var(env, "ST=%01X", sch->st);
if (ret)
return ret;
ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
return ret;
}
static struct bus_type css_bus_type = {
.name = "css",
.match = css_bus_match,
.probe = css_probe,
.remove = css_remove,
.shutdown = css_shutdown,
.uevent = css_uevent,
};
/**
* css_driver_register - register a css driver
* @cdrv: css driver to register
*
* This is mainly a wrapper around driver_register that sets name
* and bus_type in the embedded struct device_driver correctly.
*/
int css_driver_register(struct css_driver *cdrv)
{
cdrv->drv.bus = &css_bus_type;
return driver_register(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_register);
/**
* css_driver_unregister - unregister a css driver
* @cdrv: css driver to unregister
*
* This is a wrapper around driver_unregister.
*/
void css_driver_unregister(struct css_driver *cdrv)
{
driver_unregister(&cdrv->drv);
}
EXPORT_SYMBOL_GPL(css_driver_unregister);
| linux-master | drivers/s390/cio/css.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2012
* Author(s): Peter Oberparleiter <[email protected]>
*/
#include <linux/vmalloc.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include "idset.h"
#include "css.h"
struct idset {
int num_ssid;
int num_id;
unsigned long bitmap[];
};
static inline unsigned long bitmap_size(int num_ssid, int num_id)
{
return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long);
}
static struct idset *idset_new(int num_ssid, int num_id)
{
struct idset *set;
set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id));
if (set) {
set->num_ssid = num_ssid;
set->num_id = num_id;
memset(set->bitmap, 0, bitmap_size(num_ssid, num_id));
}
return set;
}
void idset_free(struct idset *set)
{
vfree(set);
}
void idset_fill(struct idset *set)
{
memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id));
}
static inline void idset_add(struct idset *set, int ssid, int id)
{
set_bit(ssid * set->num_id + id, set->bitmap);
}
static inline void idset_del(struct idset *set, int ssid, int id)
{
clear_bit(ssid * set->num_id + id, set->bitmap);
}
static inline int idset_contains(struct idset *set, int ssid, int id)
{
return test_bit(ssid * set->num_id + id, set->bitmap);
}
struct idset *idset_sch_new(void)
{
return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1);
}
void idset_sch_add(struct idset *set, struct subchannel_id schid)
{
idset_add(set, schid.ssid, schid.sch_no);
}
void idset_sch_del(struct idset *set, struct subchannel_id schid)
{
idset_del(set, schid.ssid, schid.sch_no);
}
/* Clear ids starting from @schid up to end of subchannel set. */
void idset_sch_del_subseq(struct idset *set, struct subchannel_id schid)
{
int pos = schid.ssid * set->num_id + schid.sch_no;
bitmap_clear(set->bitmap, pos, set->num_id - schid.sch_no);
}
int idset_sch_contains(struct idset *set, struct subchannel_id schid)
{
return idset_contains(set, schid.ssid, schid.sch_no);
}
int idset_is_empty(struct idset *set)
{
return bitmap_empty(set->bitmap, set->num_ssid * set->num_id);
}
void idset_add_set(struct idset *to, struct idset *from)
{
int len = min(to->num_ssid * to->num_id, from->num_ssid * from->num_id);
bitmap_or(to->bitmap, to->bitmap, from->bitmap, len);
}
| linux-master | drivers/s390/cio/idset.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Recognize and maintain s390 storage class memory.
*
* Copyright IBM Corp. 2012
* Author(s): Sebastian Ott <[email protected]>
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <asm/eadm.h>
#include "chsc.h"
static struct device *scm_root;
#define to_scm_dev(n) container_of(n, struct scm_device, dev)
#define to_scm_drv(d) container_of(d, struct scm_driver, drv)
static int scmdev_probe(struct device *dev)
{
struct scm_device *scmdev = to_scm_dev(dev);
struct scm_driver *scmdrv = to_scm_drv(dev->driver);
return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV;
}
static void scmdev_remove(struct device *dev)
{
struct scm_device *scmdev = to_scm_dev(dev);
struct scm_driver *scmdrv = to_scm_drv(dev->driver);
if (scmdrv->remove)
scmdrv->remove(scmdev);
}
static int scmdev_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
return add_uevent_var(env, "MODALIAS=scm:scmdev");
}
static struct bus_type scm_bus_type = {
.name = "scm",
.probe = scmdev_probe,
.remove = scmdev_remove,
.uevent = scmdev_uevent,
};
/**
* scm_driver_register() - register a scm driver
* @scmdrv: driver to be registered
*/
int scm_driver_register(struct scm_driver *scmdrv)
{
struct device_driver *drv = &scmdrv->drv;
drv->bus = &scm_bus_type;
return driver_register(drv);
}
EXPORT_SYMBOL_GPL(scm_driver_register);
/**
* scm_driver_unregister() - deregister a scm driver
* @scmdrv: driver to be deregistered
*/
void scm_driver_unregister(struct scm_driver *scmdrv)
{
driver_unregister(&scmdrv->drv);
}
EXPORT_SYMBOL_GPL(scm_driver_unregister);
void scm_irq_handler(struct aob *aob, blk_status_t error)
{
struct aob_rq_header *aobrq = (void *) aob->request.data;
struct scm_device *scmdev = aobrq->scmdev;
struct scm_driver *scmdrv = to_scm_drv(scmdev->dev.driver);
scmdrv->handler(scmdev, aobrq->data, error);
}
EXPORT_SYMBOL_GPL(scm_irq_handler);
#define scm_attr(name) \
static ssize_t show_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct scm_device *scmdev = to_scm_dev(dev); \
int ret; \
\
device_lock(dev); \
ret = sprintf(buf, "%u\n", scmdev->attrs.name); \
device_unlock(dev); \
\
return ret; \
} \
static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
scm_attr(persistence);
scm_attr(oper_state);
scm_attr(data_state);
scm_attr(rank);
scm_attr(release);
scm_attr(res_id);
static struct attribute *scmdev_attrs[] = {
&dev_attr_persistence.attr,
&dev_attr_oper_state.attr,
&dev_attr_data_state.attr,
&dev_attr_rank.attr,
&dev_attr_release.attr,
&dev_attr_res_id.attr,
NULL,
};
static struct attribute_group scmdev_attr_group = {
.attrs = scmdev_attrs,
};
static const struct attribute_group *scmdev_attr_groups[] = {
&scmdev_attr_group,
NULL,
};
static void scmdev_release(struct device *dev)
{
struct scm_device *scmdev = to_scm_dev(dev);
kfree(scmdev);
}
static void scmdev_setup(struct scm_device *scmdev, struct sale *sale,
unsigned int size, unsigned int max_blk_count)
{
dev_set_name(&scmdev->dev, "%016llx", (unsigned long long) sale->sa);
scmdev->nr_max_block = max_blk_count;
scmdev->address = sale->sa;
scmdev->size = 1UL << size;
scmdev->attrs.rank = sale->rank;
scmdev->attrs.persistence = sale->p;
scmdev->attrs.oper_state = sale->op_state;
scmdev->attrs.data_state = sale->data_state;
scmdev->attrs.rank = sale->rank;
scmdev->attrs.release = sale->r;
scmdev->attrs.res_id = sale->rid;
scmdev->dev.parent = scm_root;
scmdev->dev.bus = &scm_bus_type;
scmdev->dev.release = scmdev_release;
scmdev->dev.groups = scmdev_attr_groups;
}
/*
* Check for state-changes, notify the driver and userspace.
*/
static void scmdev_update(struct scm_device *scmdev, struct sale *sale)
{
struct scm_driver *scmdrv;
bool changed;
device_lock(&scmdev->dev);
changed = scmdev->attrs.rank != sale->rank ||
scmdev->attrs.oper_state != sale->op_state;
scmdev->attrs.rank = sale->rank;
scmdev->attrs.oper_state = sale->op_state;
if (!scmdev->dev.driver)
goto out;
scmdrv = to_scm_drv(scmdev->dev.driver);
if (changed && scmdrv->notify)
scmdrv->notify(scmdev, SCM_CHANGE);
out:
device_unlock(&scmdev->dev);
if (changed)
kobject_uevent(&scmdev->dev.kobj, KOBJ_CHANGE);
}
static int check_address(struct device *dev, const void *data)
{
struct scm_device *scmdev = to_scm_dev(dev);
const struct sale *sale = data;
return scmdev->address == sale->sa;
}
static struct scm_device *scmdev_find(struct sale *sale)
{
struct device *dev;
dev = bus_find_device(&scm_bus_type, NULL, sale, check_address);
return dev ? to_scm_dev(dev) : NULL;
}
static int scm_add(struct chsc_scm_info *scm_info, size_t num)
{
struct sale *sale, *scmal = scm_info->scmal;
struct scm_device *scmdev;
int ret;
for (sale = scmal; sale < scmal + num; sale++) {
scmdev = scmdev_find(sale);
if (scmdev) {
scmdev_update(scmdev, sale);
/* Release reference from scm_find(). */
put_device(&scmdev->dev);
continue;
}
scmdev = kzalloc(sizeof(*scmdev), GFP_KERNEL);
if (!scmdev)
return -ENODEV;
scmdev_setup(scmdev, sale, scm_info->is, scm_info->mbc);
ret = device_register(&scmdev->dev);
if (ret) {
/* Release reference from device_initialize(). */
put_device(&scmdev->dev);
return ret;
}
}
return 0;
}
int scm_update_information(void)
{
struct chsc_scm_info *scm_info;
u64 token = 0;
size_t num;
int ret;
scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!scm_info)
return -ENOMEM;
do {
ret = chsc_scm_info(scm_info, token);
if (ret)
break;
num = (scm_info->response.length -
(offsetof(struct chsc_scm_info, scmal) -
offsetof(struct chsc_scm_info, response))
) / sizeof(struct sale);
ret = scm_add(scm_info, num);
if (ret)
break;
token = scm_info->restok;
} while (token);
free_page((unsigned long)scm_info);
return ret;
}
static int scm_dev_avail(struct device *dev, void *unused)
{
struct scm_driver *scmdrv = to_scm_drv(dev->driver);
struct scm_device *scmdev = to_scm_dev(dev);
if (dev->driver && scmdrv->notify)
scmdrv->notify(scmdev, SCM_AVAIL);
return 0;
}
int scm_process_availability_information(void)
{
return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail);
}
static int __init scm_init(void)
{
int ret;
ret = bus_register(&scm_bus_type);
if (ret)
return ret;
scm_root = root_device_register("scm");
if (IS_ERR(scm_root)) {
bus_unregister(&scm_bus_type);
return PTR_ERR(scm_root);
}
scm_update_information();
return 0;
}
subsys_initcall_sync(scm_init);
| linux-master | drivers/s390/cio/scm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* bus driver for ccwgroup
*
* Copyright IBM Corp. 2002, 2012
*
* Author(s): Arnd Bergmann ([email protected])
* Cornelia Huck ([email protected])
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/dcache.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#include "device.h"
#define CCW_BUS_ID_SIZE 10
/* In Linux 2.4, we had a channel device layer called "chandev"
* that did all sorts of obscure stuff for networking devices.
* This is another driver that serves as a replacement for just
* one of its functions, namely the translation of single subchannels
* to devices that use multiple subchannels.
*/
static struct bus_type ccwgroup_bus_type;
static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{
int i;
char str[16];
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
}
}
/**
* ccwgroup_set_online() - enable a ccwgroup device
* @gdev: target ccwgroup device
*
* This function attempts to put the ccwgroup device into the online state.
* Returns:
* %0 on success and a negative error value on failure.
*/
int ccwgroup_set_online(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
int ret = -EINVAL;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_ONLINE)
goto out;
if (gdrv->set_online)
ret = gdrv->set_online(gdev);
if (ret)
goto out;
gdev->state = CCWGROUP_ONLINE;
out:
atomic_set(&gdev->onoff, 0);
return ret;
}
EXPORT_SYMBOL(ccwgroup_set_online);
/**
* ccwgroup_set_offline() - disable a ccwgroup device
* @gdev: target ccwgroup device
* @call_gdrv: Call the registered gdrv set_offline function
*
* This function attempts to put the ccwgroup device into the offline state.
* Returns:
* %0 on success and a negative error value on failure.
*/
int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
int ret = -EINVAL;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_OFFLINE)
goto out;
if (!call_gdrv) {
ret = 0;
goto offline;
}
if (gdrv->set_offline)
ret = gdrv->set_offline(gdev);
if (ret)
goto out;
offline:
gdev->state = CCWGROUP_OFFLINE;
out:
atomic_set(&gdev->onoff, 0);
return ret;
}
EXPORT_SYMBOL(ccwgroup_set_offline);
static ssize_t ccwgroup_online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
unsigned long value;
int ret;
device_lock(dev);
if (!dev->driver) {
ret = -EINVAL;
goto out;
}
ret = kstrtoul(buf, 0, &value);
if (ret)
goto out;
if (value == 1)
ret = ccwgroup_set_online(gdev);
else if (value == 0)
ret = ccwgroup_set_offline(gdev, true);
else
ret = -EINVAL;
out:
device_unlock(dev);
return (ret == 0) ? count : ret;
}
static ssize_t ccwgroup_online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
int online;
online = (gdev->state == CCWGROUP_ONLINE) ? 1 : 0;
return scnprintf(buf, PAGE_SIZE, "%d\n", online);
}
/*
* Provide an 'ungroup' attribute so the user can remove group devices no
* longer needed or accidentally created. Saves memory :)
*/
static void ccwgroup_ungroup(struct ccwgroup_device *gdev)
{
mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
device_unregister(&gdev->dev);
}
mutex_unlock(&gdev->reg_mutex);
}
static ssize_t ccwgroup_ungroup_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
int rc = 0;
/* Prevent concurrent online/offline processing and ungrouping. */
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state != CCWGROUP_OFFLINE) {
rc = -EINVAL;
goto out;
}
if (device_remove_file_self(dev, attr))
ccwgroup_ungroup(gdev);
else
rc = -ENODEV;
out:
if (rc) {
/* Release onoff "lock" when ungrouping failed. */
atomic_set(&gdev->onoff, 0);
return rc;
}
return count;
}
static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
static struct attribute *ccwgroup_dev_attrs[] = {
&dev_attr_online.attr,
&dev_attr_ungroup.attr,
NULL,
};
ATTRIBUTE_GROUPS(ccwgroup_dev);
static void ccwgroup_ungroup_workfn(struct work_struct *work)
{
struct ccwgroup_device *gdev =
container_of(work, struct ccwgroup_device, ungroup_work);
ccwgroup_ungroup(gdev);
put_device(&gdev->dev);
}
static void ccwgroup_release(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
unsigned int i;
for (i = 0; i < gdev->count; i++) {
struct ccw_device *cdev = gdev->cdev[i];
unsigned long flags;
if (cdev) {
spin_lock_irqsave(cdev->ccwlock, flags);
if (dev_get_drvdata(&cdev->dev) == gdev)
dev_set_drvdata(&cdev->dev, NULL);
spin_unlock_irqrestore(cdev->ccwlock, flags);
put_device(&cdev->dev);
}
}
kfree(gdev);
}
static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
{
char str[16];
int i, rc;
for (i = 0; i < gdev->count; i++) {
rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
&gdev->dev.kobj, "group_device");
if (rc) {
for (--i; i >= 0; i--)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
}
}
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
rc = sysfs_create_link(&gdev->dev.kobj,
&gdev->cdev[i]->dev.kobj, str);
if (rc) {
for (--i; i >= 0; i--) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
}
for (i = 0; i < gdev->count; i++)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
}
}
return 0;
}
static int __get_next_id(const char **buf, struct ccw_dev_id *id)
{
unsigned int cssid, ssid, devno;
int ret = 0, len;
char *start, *end;
start = (char *)*buf;
end = strchr(start, ',');
if (!end) {
/* Last entry. Strip trailing newline, if applicable. */
end = strchr(start, '\n');
if (end)
*end = '\0';
len = strlen(start) + 1;
} else {
len = end - start + 1;
end++;
}
if (len <= CCW_BUS_ID_SIZE) {
if (sscanf(start, "%2x.%1x.%04x", &cssid, &ssid, &devno) != 3)
ret = -EINVAL;
} else
ret = -EINVAL;
if (!ret) {
id->ssid = ssid;
id->devno = devno;
}
*buf = end;
return ret;
}
/**
* ccwgroup_create_dev() - create and register a ccw group device
* @parent: parent device for the new device
* @gdrv: driver for the new group device
* @num_devices: number of slave devices
* @buf: buffer containing comma separated bus ids of slave devices
*
* Create and register a new ccw group device as a child of @parent. Slave
* devices are obtained from the list of bus ids given in @buf.
* Returns:
* %0 on success and an error code on failure.
* Context:
* non-atomic
*/
int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv,
int num_devices, const char *buf)
{
struct ccwgroup_device *gdev;
struct ccw_dev_id dev_id;
int rc, i;
if (num_devices < 1)
return -EINVAL;
gdev = kzalloc(struct_size(gdev, cdev, num_devices), GFP_KERNEL);
if (!gdev)
return -ENOMEM;
atomic_set(&gdev->onoff, 0);
mutex_init(&gdev->reg_mutex);
mutex_lock(&gdev->reg_mutex);
INIT_WORK(&gdev->ungroup_work, ccwgroup_ungroup_workfn);
gdev->count = num_devices;
gdev->dev.bus = &ccwgroup_bus_type;
gdev->dev.parent = parent;
gdev->dev.release = ccwgroup_release;
device_initialize(&gdev->dev);
for (i = 0; i < num_devices && buf; i++) {
rc = __get_next_id(&buf, &dev_id);
if (rc != 0)
goto error;
gdev->cdev[i] = get_ccwdev_by_dev_id(&dev_id);
/*
* All devices have to be of the same type in
* order to be grouped.
*/
if (!gdev->cdev[i] || !gdev->cdev[i]->drv ||
gdev->cdev[i]->drv != gdev->cdev[0]->drv ||
gdev->cdev[i]->id.driver_info !=
gdev->cdev[0]->id.driver_info) {
rc = -EINVAL;
goto error;
}
/* Don't allow a device to belong to more than one group. */
spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
spin_unlock_irq(gdev->cdev[i]->ccwlock);
rc = -EINVAL;
goto error;
}
dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
spin_unlock_irq(gdev->cdev[i]->ccwlock);
}
/* Check for sufficient number of bus ids. */
if (i < num_devices) {
rc = -EINVAL;
goto error;
}
/* Check for trailing stuff. */
if (i == num_devices && buf && strlen(buf) > 0) {
rc = -EINVAL;
goto error;
}
/* Check if the devices are bound to the required ccw driver. */
if (gdrv && gdrv->ccw_driver &&
gdev->cdev[0]->drv != gdrv->ccw_driver) {
rc = -EINVAL;
goto error;
}
dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
if (gdrv) {
gdev->dev.driver = &gdrv->driver;
rc = gdrv->setup ? gdrv->setup(gdev) : 0;
if (rc)
goto error;
}
rc = device_add(&gdev->dev);
if (rc)
goto error;
rc = __ccwgroup_create_symlinks(gdev);
if (rc) {
device_del(&gdev->dev);
goto error;
}
mutex_unlock(&gdev->reg_mutex);
return 0;
error:
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
return rc;
}
EXPORT_SYMBOL(ccwgroup_create_dev);
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(data);
if (action == BUS_NOTIFY_UNBOUND_DRIVER) {
get_device(&gdev->dev);
schedule_work(&gdev->ungroup_work);
}
return NOTIFY_OK;
}
static struct notifier_block ccwgroup_nb = {
.notifier_call = ccwgroup_notifier
};
static int __init init_ccwgroup(void)
{
int ret;
ret = bus_register(&ccwgroup_bus_type);
if (ret)
return ret;
ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
if (ret)
bus_unregister(&ccwgroup_bus_type);
return ret;
}
static void __exit cleanup_ccwgroup(void)
{
bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
bus_unregister(&ccwgroup_bus_type);
}
module_init(init_ccwgroup);
module_exit(cleanup_ccwgroup);
/************************** driver stuff ******************************/
static void ccwgroup_remove(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
if (gdrv->remove)
gdrv->remove(gdev);
}
static void ccwgroup_shutdown(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
if (!dev->driver)
return;
if (gdrv->shutdown)
gdrv->shutdown(gdev);
}
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.dev_groups = ccwgroup_dev_groups,
.remove = ccwgroup_remove,
.shutdown = ccwgroup_shutdown,
};
bool dev_is_ccwgroup(struct device *dev)
{
return dev->bus == &ccwgroup_bus_type;
}
EXPORT_SYMBOL(dev_is_ccwgroup);
/**
* ccwgroup_driver_register() - register a ccw group driver
* @cdriver: driver to be registered
*
* This function is mainly a wrapper around driver_register().
*/
int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
{
/* register our new driver with the core */
cdriver->driver.bus = &ccwgroup_bus_type;
return driver_register(&cdriver->driver);
}
EXPORT_SYMBOL(ccwgroup_driver_register);
/**
* ccwgroup_driver_unregister() - deregister a ccw group driver
* @cdriver: driver to be deregistered
*
* This function is mainly a wrapper around driver_unregister().
*/
void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
{
driver_unregister(&cdriver->driver);
}
EXPORT_SYMBOL(ccwgroup_driver_unregister);
/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
* @cdev: ccw device to be probed
*
* This is a dummy probe function for ccw devices that are slave devices in
* a ccw group device.
* Returns:
* always %0
*/
int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
{
return 0;
}
EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
/**
* ccwgroup_remove_ccwdev() - remove function for slave devices
* @cdev: ccw device to be removed
*
* This is a remove function for ccw devices that are slave devices in a ccw
* group device. It sets the ccw device offline and also deregisters the
* embedding ccw group device.
*/
void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
{
struct ccwgroup_device *gdev;
/* Ignore offlining errors, device is gone anyway. */
ccw_device_set_offline(cdev);
/* If one of its devices is gone, the whole group is done for. */
spin_lock_irq(cdev->ccwlock);
gdev = dev_get_drvdata(&cdev->dev);
if (!gdev) {
spin_unlock_irq(cdev->ccwlock);
return;
}
/* Get ccwgroup device reference for local processing. */
get_device(&gdev->dev);
spin_unlock_irq(cdev->ccwlock);
/* Unregister group device. */
ccwgroup_ungroup(gdev);
/* Release ccwgroup device reference for local processing. */
put_device(&gdev->dev);
}
EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
MODULE_LICENSE("GPL");
| linux-master | drivers/s390/cio/ccwgroup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CCW device SENSE ID I/O handling.
*
* Copyright IBM Corp. 2002, 2009
* Author(s): Cornelia Huck <[email protected]>
* Martin Schwidefsky <[email protected]>
* Peter Oberparleiter <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/ccwdev.h>
#include <asm/setup.h>
#include <asm/cio.h>
#include <asm/diag.h>
#include "cio.h"
#include "cio_debug.h"
#include "device.h"
#include "io_sch.h"
#define SENSE_ID_RETRIES 256
#define SENSE_ID_TIMEOUT (10 * HZ)
#define SENSE_ID_MIN_LEN 4
#define SENSE_ID_BASIC_LEN 7
/**
* diag210_to_senseid - convert diag 0x210 data to sense id information
* @senseid: sense id
* @diag: diag 0x210 data
*
* Return 0 on success, non-zero otherwise.
*/
static int diag210_to_senseid(struct senseid *senseid, struct diag210 *diag)
{
static struct {
int class, type, cu_type;
} vm_devices[] = {
{ 0x08, 0x01, 0x3480 },
{ 0x08, 0x02, 0x3430 },
{ 0x08, 0x10, 0x3420 },
{ 0x08, 0x42, 0x3424 },
{ 0x08, 0x44, 0x9348 },
{ 0x08, 0x81, 0x3490 },
{ 0x08, 0x82, 0x3422 },
{ 0x10, 0x41, 0x1403 },
{ 0x10, 0x42, 0x3211 },
{ 0x10, 0x43, 0x3203 },
{ 0x10, 0x45, 0x3800 },
{ 0x10, 0x47, 0x3262 },
{ 0x10, 0x48, 0x3820 },
{ 0x10, 0x49, 0x3800 },
{ 0x10, 0x4a, 0x4245 },
{ 0x10, 0x4b, 0x4248 },
{ 0x10, 0x4d, 0x3800 },
{ 0x10, 0x4e, 0x3820 },
{ 0x10, 0x4f, 0x3820 },
{ 0x10, 0x82, 0x2540 },
{ 0x10, 0x84, 0x3525 },
{ 0x20, 0x81, 0x2501 },
{ 0x20, 0x82, 0x2540 },
{ 0x20, 0x84, 0x3505 },
{ 0x40, 0x01, 0x3278 },
{ 0x40, 0x04, 0x3277 },
{ 0x40, 0x80, 0x2250 },
{ 0x40, 0xc0, 0x5080 },
{ 0x80, 0x00, 0x3215 },
};
int i;
/* Special case for osa devices. */
if (diag->vrdcvcla == 0x02 && diag->vrdcvtyp == 0x20) {
senseid->cu_type = 0x3088;
senseid->cu_model = 0x60;
senseid->reserved = 0xff;
return 0;
}
for (i = 0; i < ARRAY_SIZE(vm_devices); i++) {
if (diag->vrdcvcla == vm_devices[i].class &&
diag->vrdcvtyp == vm_devices[i].type) {
senseid->cu_type = vm_devices[i].cu_type;
senseid->reserved = 0xff;
return 0;
}
}
return -ENODEV;
}
/**
* diag210_get_dev_info - retrieve device information via diag 0x210
* @cdev: ccw device
*
* Returns zero on success, non-zero otherwise.
*/
static int diag210_get_dev_info(struct ccw_device *cdev)
{
struct ccw_dev_id *dev_id = &cdev->private->dev_id;
struct senseid *senseid = &cdev->private->dma_area->senseid;
struct diag210 diag_data;
int rc;
if (dev_id->ssid != 0)
return -ENODEV;
memset(&diag_data, 0, sizeof(diag_data));
diag_data.vrdcdvno = dev_id->devno;
diag_data.vrdclen = sizeof(diag_data);
rc = diag210(&diag_data);
CIO_TRACE_EVENT(4, "diag210");
CIO_HEX_EVENT(4, &rc, sizeof(rc));
CIO_HEX_EVENT(4, &diag_data, sizeof(diag_data));
if (rc != 0 && rc != 2)
goto err_failed;
if (diag210_to_senseid(senseid, &diag_data))
goto err_unknown;
return 0;
err_unknown:
CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: unknown diag210 data\n",
dev_id->ssid, dev_id->devno);
return -ENODEV;
err_failed:
CIO_MSG_EVENT(0, "snsid: device 0.%x.%04x: diag210 failed (rc=%d)\n",
dev_id->ssid, dev_id->devno, rc);
return -ENODEV;
}
/*
* Initialize SENSE ID data.
*/
static void snsid_init(struct ccw_device *cdev)
{
cdev->private->flags.esid = 0;
memset(&cdev->private->dma_area->senseid, 0,
sizeof(cdev->private->dma_area->senseid));
cdev->private->dma_area->senseid.cu_type = 0xffff;
}
/*
* Check for complete SENSE ID data.
*/
static int snsid_check(struct ccw_device *cdev, void *data)
{
struct cmd_scsw *scsw = &cdev->private->dma_area->irb.scsw.cmd;
int len = sizeof(struct senseid) - scsw->count;
/* Check for incomplete SENSE ID data. */
if (len < SENSE_ID_MIN_LEN)
goto out_restart;
if (cdev->private->dma_area->senseid.cu_type == 0xffff)
goto out_restart;
/* Check for incompatible SENSE ID data. */
if (cdev->private->dma_area->senseid.reserved != 0xff)
return -EOPNOTSUPP;
/* Check for extended-identification information. */
if (len > SENSE_ID_BASIC_LEN)
cdev->private->flags.esid = 1;
return 0;
out_restart:
snsid_init(cdev);
return -EAGAIN;
}
/*
* Process SENSE ID request result.
*/
static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
{
struct ccw_dev_id *id = &cdev->private->dev_id;
struct senseid *senseid = &cdev->private->dma_area->senseid;
int vm = 0;
if (rc && MACHINE_IS_VM) {
/* Try diag 0x210 fallback on z/VM. */
snsid_init(cdev);
if (diag210_get_dev_info(cdev) == 0) {
rc = 0;
vm = 1;
}
}
CIO_MSG_EVENT(2, "snsid: device 0.%x.%04x: rc=%d %04x/%02x "
"%04x/%02x%s\n", id->ssid, id->devno, rc,
senseid->cu_type, senseid->cu_model, senseid->dev_type,
senseid->dev_model, vm ? " (diag210)" : "");
ccw_device_sense_id_done(cdev, rc);
}
/**
* ccw_device_sense_id_start - perform SENSE ID
* @cdev: ccw device
*
* Execute a SENSE ID channel program on @cdev to update its sense id
* information. When finished, call ccw_device_sense_id_done with a
* return code specifying the result.
*/
void ccw_device_sense_id_start(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
struct ccw1 *cp = cdev->private->dma_area->iccws;
CIO_TRACE_EVENT(4, "snsid");
CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
/* Data setup. */
snsid_init(cdev);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_ID;
cp->cda = (u32)virt_to_phys(&cdev->private->dma_area->senseid);
cp->count = sizeof(struct senseid);
cp->flags = CCW_FLAG_SLI;
/* Request setup. */
memset(req, 0, sizeof(*req));
req->cp = cp;
req->timeout = SENSE_ID_TIMEOUT;
req->maxretries = SENSE_ID_RETRIES;
req->lpm = sch->schib.pmcw.pam & sch->opm;
req->check = snsid_check;
req->callback = snsid_callback;
ccw_request_start(cdev);
}
| linux-master | drivers/s390/cio/device_id.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S/390 common I/O debugfs interface
*
* Copyright IBM Corp. 2021
* Author(s): Vineeth Vijayan <[email protected]>
*/
#include <linux/debugfs.h>
#include "cio_debug.h"
struct dentry *cio_debugfs_dir;
/* Create the debugfs directory for CIO under the arch_debugfs_dir
* i.e /sys/kernel/debug/s390/cio
*/
static int __init cio_debugfs_init(void)
{
cio_debugfs_dir = debugfs_create_dir("cio", arch_debugfs_dir);
return 0;
}
subsys_initcall(cio_debugfs_init);
| linux-master | drivers/s390/cio/cio_debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Tracepoint definitions for vfio_ccw
*
* Copyright IBM Corp. 2019
* Author(s): Eric Farman <[email protected]>
*/
#define CREATE_TRACE_POINTS
#include "vfio_ccw_trace.h"
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_chp_event);
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_async_request);
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_event);
EXPORT_TRACEPOINT_SYMBOL(vfio_ccw_fsm_io_request);
| linux-master | drivers/s390/cio/vfio_ccw_trace.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Channel report handling code
*
* Copyright IBM Corp. 2000, 2009
* Author(s): Ingo Adlung <[email protected]>,
* Martin Schwidefsky <[email protected]>,
* Cornelia Huck <[email protected]>,
*/
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <asm/crw.h>
#include <asm/ctl_reg.h>
#include "ioasm.h"
static DEFINE_MUTEX(crw_handler_mutex);
static crw_handler_t crw_handlers[NR_RSCS];
static atomic_t crw_nr_req = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
/**
* crw_register_handler() - register a channel report word handler
* @rsc: reporting source code to handle
* @handler: handler to be registered
*
* Returns %0 on success and a negative error value otherwise.
*/
int crw_register_handler(int rsc, crw_handler_t handler)
{
int rc = 0;
if ((rsc < 0) || (rsc >= NR_RSCS))
return -EINVAL;
mutex_lock(&crw_handler_mutex);
if (crw_handlers[rsc])
rc = -EBUSY;
else
crw_handlers[rsc] = handler;
mutex_unlock(&crw_handler_mutex);
return rc;
}
/**
* crw_unregister_handler() - unregister a channel report word handler
* @rsc: reporting source code to handle
*/
void crw_unregister_handler(int rsc)
{
if ((rsc < 0) || (rsc >= NR_RSCS))
return;
mutex_lock(&crw_handler_mutex);
crw_handlers[rsc] = NULL;
mutex_unlock(&crw_handler_mutex);
}
/*
* Retrieve CRWs and call function to handle event.
*/
static int crw_collect_info(void *unused)
{
struct crw crw[2];
int ccode, signal;
unsigned int chain;
repeat:
signal = wait_event_interruptible(crw_handler_wait_q,
atomic_read(&crw_nr_req) > 0);
if (unlikely(signal))
atomic_inc(&crw_nr_req);
chain = 0;
while (1) {
crw_handler_t handler;
if (unlikely(chain > 1)) {
struct crw tmp_crw;
printk(KERN_WARNING"%s: Code does not support more "
"than two chained crws; please report to "
"[email protected]!\n", __func__);
ccode = stcrw(&tmp_crw);
printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
__func__, tmp_crw.slct, tmp_crw.oflw,
tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
tmp_crw.erc, tmp_crw.rsid);
printk(KERN_WARNING"%s: This was crw number %x in the "
"chain\n", __func__, chain);
if (ccode != 0)
break;
chain = tmp_crw.chn ? chain + 1 : 0;
continue;
}
ccode = stcrw(&crw[chain]);
if (ccode != 0)
break;
printk(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw[chain].slct, crw[chain].oflw, crw[chain].chn,
crw[chain].rsc, crw[chain].anc, crw[chain].erc,
crw[chain].rsid);
/* Check for overflows. */
if (crw[chain].oflw) {
int i;
pr_debug("%s: crw overflow detected!\n", __func__);
mutex_lock(&crw_handler_mutex);
for (i = 0; i < NR_RSCS; i++) {
if (crw_handlers[i])
crw_handlers[i](NULL, NULL, 1);
}
mutex_unlock(&crw_handler_mutex);
chain = 0;
continue;
}
if (crw[0].chn && !chain) {
chain++;
continue;
}
mutex_lock(&crw_handler_mutex);
handler = crw_handlers[crw[chain].rsc];
if (handler)
handler(&crw[0], chain ? &crw[1] : NULL, 0);
mutex_unlock(&crw_handler_mutex);
/* chain is always 0 or 1 here. */
chain = crw[chain].chn ? chain + 1 : 0;
}
if (atomic_dec_and_test(&crw_nr_req))
wake_up(&crw_handler_wait_q);
goto repeat;
return 0;
}
void crw_handle_channel_report(void)
{
atomic_inc(&crw_nr_req);
wake_up(&crw_handler_wait_q);
}
void crw_wait_for_channel_report(void)
{
crw_handle_channel_report();
wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
}
/*
* Machine checks for the channel subsystem must be enabled
* after the channel subsystem is initialized
*/
static int __init crw_machine_check_init(void)
{
struct task_struct *task;
task = kthread_run(crw_collect_info, NULL, "kmcheck");
if (IS_ERR(task))
return PTR_ERR(task);
ctl_set_bit(14, 28); /* enable channel report MCH */
return 0;
}
device_initcall(crw_machine_check_init);
| linux-master | drivers/s390/cio/crw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Async I/O region for vfio_ccw
*
* Copyright Red Hat, Inc. 2019
*
* Author(s): Cornelia Huck <[email protected]>
*/
#include <linux/vfio.h>
#include "vfio_ccw_private.h"
static ssize_t vfio_ccw_async_region_read(struct vfio_ccw_private *private,
char __user *buf, size_t count,
loff_t *ppos)
{
unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_cmd_region *region;
int ret;
if (pos + count > sizeof(*region))
return -EINVAL;
mutex_lock(&private->io_mutex);
region = private->region[i].data;
if (copy_to_user(buf, (void *)region + pos, count))
ret = -EFAULT;
else
ret = count;
mutex_unlock(&private->io_mutex);
return ret;
}
static ssize_t vfio_ccw_async_region_write(struct vfio_ccw_private *private,
const char __user *buf, size_t count,
loff_t *ppos)
{
unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_cmd_region *region;
int ret;
if (pos + count > sizeof(*region))
return -EINVAL;
if (!mutex_trylock(&private->io_mutex))
return -EAGAIN;
region = private->region[i].data;
if (copy_from_user((void *)region + pos, buf, count)) {
ret = -EFAULT;
goto out_unlock;
}
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_ASYNC_REQ);
ret = region->ret_code ? region->ret_code : count;
out_unlock:
mutex_unlock(&private->io_mutex);
return ret;
}
static void vfio_ccw_async_region_release(struct vfio_ccw_private *private,
struct vfio_ccw_region *region)
{
}
static const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
.read = vfio_ccw_async_region_read,
.write = vfio_ccw_async_region_write,
.release = vfio_ccw_async_region_release,
};
int vfio_ccw_register_async_dev_regions(struct vfio_ccw_private *private)
{
return vfio_ccw_register_dev_region(private,
VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD,
&vfio_ccw_async_region_ops,
sizeof(struct ccw_cmd_region),
VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE,
private->cmd_region);
}
| linux-master | drivers/s390/cio/vfio_ccw_async.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CIO inject interface
*
* Copyright IBM Corp. 2021
* Author(s): Vineeth Vijayan <[email protected]>
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
#include <asm/chpid.h>
#include "cio_inject.h"
#include "cio_debug.h"
static DEFINE_SPINLOCK(crw_inject_lock);
DEFINE_STATIC_KEY_FALSE(cio_inject_enabled);
static struct crw *crw_inject_data;
/**
* crw_inject : Initiate the artificial CRW inject
* @crw: The data which needs to be injected as new CRW.
*
* The CRW handler is called, which will use the provided artificial
* data instead of the CRW from the underlying hardware.
*
* Return: 0 on success
*/
static int crw_inject(struct crw *crw)
{
int rc = 0;
struct crw *copy;
unsigned long flags;
copy = kmemdup(crw, sizeof(*crw), GFP_KERNEL);
if (!copy)
return -ENOMEM;
spin_lock_irqsave(&crw_inject_lock, flags);
if (crw_inject_data) {
kfree(copy);
rc = -EBUSY;
} else {
crw_inject_data = copy;
}
spin_unlock_irqrestore(&crw_inject_lock, flags);
if (!rc)
crw_handle_channel_report();
return rc;
}
/**
* stcrw_get_injected: Copy the artificial CRW data to CRW struct.
* @crw: The target CRW pointer.
*
* Retrieve an injected CRW data. Return 0 on success, 1 if no
* injected-CRW is available. The function reproduces the return
* code of the actual STCRW function.
*/
int stcrw_get_injected(struct crw *crw)
{
int rc = 1;
unsigned long flags;
spin_lock_irqsave(&crw_inject_lock, flags);
if (crw_inject_data) {
memcpy(crw, crw_inject_data, sizeof(*crw));
kfree(crw_inject_data);
crw_inject_data = NULL;
rc = 0;
}
spin_unlock_irqrestore(&crw_inject_lock, flags);
return rc;
}
/* The debugfs write handler for crw_inject nodes operation */
static ssize_t crw_inject_write(struct file *file, const char __user *buf,
size_t lbuf, loff_t *ppos)
{
u32 slct, oflw, chn, rsc, anc, erc, rsid;
struct crw crw;
char *buffer;
int rc;
if (!static_branch_likely(&cio_inject_enabled)) {
pr_warn("CIO inject is not enabled - ignoring CRW inject\n");
return -EINVAL;
}
buffer = vmemdup_user(buf, lbuf);
if (IS_ERR(buffer))
return -ENOMEM;
rc = sscanf(buffer, "%x %x %x %x %x %x %x", &slct, &oflw, &chn, &rsc, &anc,
&erc, &rsid);
kvfree(buffer);
if (rc != 7) {
pr_warn("crw_inject: Invalid format (need <solicited> <overflow> <chaining> <rsc> <ancillary> <erc> <rsid>)\n");
return -EINVAL;
}
memset(&crw, 0, sizeof(crw));
crw.slct = slct;
crw.oflw = oflw;
crw.chn = chn;
crw.rsc = rsc;
crw.anc = anc;
crw.erc = erc;
crw.rsid = rsid;
rc = crw_inject(&crw);
if (rc)
return rc;
return lbuf;
}
/* Debugfs write handler for inject_enable node*/
static ssize_t enable_inject_write(struct file *file, const char __user *buf,
size_t lbuf, loff_t *ppos)
{
unsigned long en = 0;
int rc;
rc = kstrtoul_from_user(buf, lbuf, 10, &en);
if (rc)
return rc;
switch (en) {
case 0:
static_branch_disable(&cio_inject_enabled);
break;
case 1:
static_branch_enable(&cio_inject_enabled);
break;
}
return lbuf;
}
static const struct file_operations crw_fops = {
.owner = THIS_MODULE,
.write = crw_inject_write,
};
static const struct file_operations cio_en_fops = {
.owner = THIS_MODULE,
.write = enable_inject_write,
};
static int __init cio_inject_init(void)
{
/* enable_inject node enables the static branching */
debugfs_create_file("enable_inject", 0200, cio_debugfs_dir,
NULL, &cio_en_fops);
debugfs_create_file("crw_inject", 0200, cio_debugfs_dir,
NULL, &crw_fops);
return 0;
}
device_initcall(cio_inject_init);
| linux-master | drivers/s390/cio/cio_inject.c |
// SPDX-License-Identifier: GPL-2.0
/*
* channel program interfaces
*
* Copyright IBM Corp. 2017
*
* Author(s): Dong Jia Shi <[email protected]>
* Xiao Feng Ren <[email protected]>
*/
#include <linux/ratelimit.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/vfio.h>
#include <asm/idals.h>
#include "vfio_ccw_cp.h"
#include "vfio_ccw_private.h"
struct page_array {
/* Array that stores pages need to pin. */
dma_addr_t *pa_iova;
/* Array that receives the pinned pages. */
struct page **pa_page;
/* Number of pages pinned from @pa_iova. */
int pa_nr;
};
struct ccwchain {
struct list_head next;
struct ccw1 *ch_ccw;
/* Guest physical address of the current chain. */
u64 ch_iova;
/* Count of the valid ccws in chain. */
int ch_len;
/* Pinned PAGEs for the original data. */
struct page_array *ch_pa;
};
/*
* page_array_alloc() - alloc memory for page array
* @pa: page_array on which to perform the operation
* @len: number of pages that should be pinned from @iova
*
* Attempt to allocate memory for page array.
*
* Usage of page_array:
* We expect (pa_nr == 0) and (pa_iova == NULL), any field in
* this structure will be filled in by this function.
*
* Returns:
* 0 if page array is allocated
* -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova is not NULL
* -ENOMEM if alloc failed
*/
static int page_array_alloc(struct page_array *pa, unsigned int len)
{
if (pa->pa_nr || pa->pa_iova)
return -EINVAL;
if (len == 0)
return -EINVAL;
pa->pa_nr = len;
pa->pa_iova = kcalloc(len, sizeof(*pa->pa_iova), GFP_KERNEL);
if (!pa->pa_iova)
return -ENOMEM;
pa->pa_page = kcalloc(len, sizeof(*pa->pa_page), GFP_KERNEL);
if (!pa->pa_page) {
kfree(pa->pa_iova);
return -ENOMEM;
}
return 0;
}
/*
* page_array_unpin() - Unpin user pages in memory
* @pa: page_array on which to perform the operation
* @vdev: the vfio device to perform the operation
* @pa_nr: number of user pages to unpin
* @unaligned: were pages unaligned on the pin request
*
* Only unpin if any pages were pinned to begin with, i.e. pa_nr > 0,
* otherwise only clear pa->pa_nr
*/
static void page_array_unpin(struct page_array *pa,
struct vfio_device *vdev, int pa_nr, bool unaligned)
{
int unpinned = 0, npage = 1;
while (unpinned < pa_nr) {
dma_addr_t *first = &pa->pa_iova[unpinned];
dma_addr_t *last = &first[npage];
if (unpinned + npage < pa_nr &&
*first + npage * PAGE_SIZE == *last &&
!unaligned) {
npage++;
continue;
}
vfio_unpin_pages(vdev, *first, npage);
unpinned += npage;
npage = 1;
}
pa->pa_nr = 0;
}
/*
* page_array_pin() - Pin user pages in memory
* @pa: page_array on which to perform the operation
* @vdev: the vfio device to perform pin operations
* @unaligned: are pages aligned to 4K boundary?
*
* Returns number of pages pinned upon success.
* If the pin request partially succeeds, or fails completely,
* all pages are left unpinned and a negative error value is returned.
*
* Requests to pin "aligned" pages can be coalesced into a single
* vfio_pin_pages request for the sake of efficiency, based on the
* expectation of 4K page requests. Unaligned requests are probably
* dealing with 2K "pages", and cannot be coalesced without
* reworking this logic to incorporate that math.
*/
static int page_array_pin(struct page_array *pa, struct vfio_device *vdev, bool unaligned)
{
int pinned = 0, npage = 1;
int ret = 0;
while (pinned < pa->pa_nr) {
dma_addr_t *first = &pa->pa_iova[pinned];
dma_addr_t *last = &first[npage];
if (pinned + npage < pa->pa_nr &&
*first + npage * PAGE_SIZE == *last &&
!unaligned) {
npage++;
continue;
}
ret = vfio_pin_pages(vdev, *first, npage,
IOMMU_READ | IOMMU_WRITE,
&pa->pa_page[pinned]);
if (ret < 0) {
goto err_out;
} else if (ret > 0 && ret != npage) {
pinned += ret;
ret = -EINVAL;
goto err_out;
}
pinned += npage;
npage = 1;
}
return ret;
err_out:
page_array_unpin(pa, vdev, pinned, unaligned);
return ret;
}
/* Unpin the pages before releasing the memory. */
static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev, bool unaligned)
{
page_array_unpin(pa, vdev, pa->pa_nr, unaligned);
kfree(pa->pa_page);
kfree(pa->pa_iova);
}
static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length)
{
u64 iova_pfn_start = iova >> PAGE_SHIFT;
u64 iova_pfn_end = (iova + length - 1) >> PAGE_SHIFT;
u64 pfn;
int i;
for (i = 0; i < pa->pa_nr; i++) {
pfn = pa->pa_iova[i] >> PAGE_SHIFT;
if (pfn >= iova_pfn_start && pfn <= iova_pfn_end)
return true;
}
return false;
}
/* Create the list of IDAL words for a page_array. */
static inline void page_array_idal_create_words(struct page_array *pa,
unsigned long *idaws)
{
int i;
/*
* Idal words (execept the first one) rely on the memory being 4k
* aligned. If a user virtual address is 4K aligned, then it's
* corresponding kernel physical address will also be 4K aligned. Thus
* there will be no problem here to simply use the phys to create an
* idaw.
*/
for (i = 0; i < pa->pa_nr; i++) {
idaws[i] = page_to_phys(pa->pa_page[i]);
/* Incorporate any offset from each starting address */
idaws[i] += pa->pa_iova[i] & (PAGE_SIZE - 1);
}
}
static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
{
struct ccw0 ccw0;
struct ccw1 *pccw1 = source;
int i;
for (i = 0; i < len; i++) {
ccw0 = *(struct ccw0 *)pccw1;
if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
pccw1->cmd_code = CCW_CMD_TIC;
pccw1->flags = 0;
pccw1->count = 0;
} else {
pccw1->cmd_code = ccw0.cmd_code;
pccw1->flags = ccw0.flags;
pccw1->count = ccw0.count;
}
pccw1->cda = ccw0.cda;
pccw1++;
}
}
#define idal_is_2k(_cp) (!(_cp)->orb.cmd.c64 || (_cp)->orb.cmd.i2k)
/*
* Helpers to operate ccwchain.
*/
#define ccw_is_read(_ccw) (((_ccw)->cmd_code & 0x03) == 0x02)
#define ccw_is_read_backward(_ccw) (((_ccw)->cmd_code & 0x0F) == 0x0C)
#define ccw_is_sense(_ccw) (((_ccw)->cmd_code & 0x0F) == CCW_CMD_BASIC_SENSE)
#define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP)
#define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC)
#define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA)
#define ccw_is_skip(_ccw) ((_ccw)->flags & CCW_FLAG_SKIP)
#define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
/*
* ccw_does_data_transfer()
*
* Determine whether a CCW will move any data, such that the guest pages
* would need to be pinned before performing the I/O.
*
* Returns 1 if yes, 0 if no.
*/
static inline int ccw_does_data_transfer(struct ccw1 *ccw)
{
/* If the count field is zero, then no data will be transferred */
if (ccw->count == 0)
return 0;
/* If the command is a NOP, then no data will be transferred */
if (ccw_is_noop(ccw))
return 0;
/* If the skip flag is off, then data will be transferred */
if (!ccw_is_skip(ccw))
return 1;
/*
* If the skip flag is on, it is only meaningful if the command
* code is a read, read backward, sense, or sense ID. In those
* cases, no data will be transferred.
*/
if (ccw_is_read(ccw) || ccw_is_read_backward(ccw))
return 0;
if (ccw_is_sense(ccw))
return 0;
/* The skip flag is on, but it is ignored for this command code. */
return 1;
}
/*
* is_cpa_within_range()
*
* @cpa: channel program address being questioned
* @head: address of the beginning of a CCW chain
* @len: number of CCWs within the chain
*
* Determine whether the address of a CCW (whether a new chain,
* or the target of a TIC) falls within a range (including the end points).
*
* Returns 1 if yes, 0 if no.
*/
static inline int is_cpa_within_range(u32 cpa, u32 head, int len)
{
u32 tail = head + (len - 1) * sizeof(struct ccw1);
return (head <= cpa && cpa <= tail);
}
static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len)
{
if (!ccw_is_tic(ccw))
return 0;
return is_cpa_within_range(ccw->cda, head, len);
}
static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
{
struct ccwchain *chain;
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (!chain)
return NULL;
chain->ch_ccw = kcalloc(len, sizeof(*chain->ch_ccw), GFP_DMA | GFP_KERNEL);
if (!chain->ch_ccw)
goto out_err;
chain->ch_pa = kcalloc(len, sizeof(*chain->ch_pa), GFP_KERNEL);
if (!chain->ch_pa)
goto out_err;
list_add_tail(&chain->next, &cp->ccwchain_list);
return chain;
out_err:
kfree(chain->ch_ccw);
kfree(chain);
return NULL;
}
static void ccwchain_free(struct ccwchain *chain)
{
list_del(&chain->next);
kfree(chain->ch_pa);
kfree(chain->ch_ccw);
kfree(chain);
}
/* Free resource for a ccw that allocated memory for its cda. */
static void ccwchain_cda_free(struct ccwchain *chain, int idx)
{
struct ccw1 *ccw = &chain->ch_ccw[idx];
if (ccw_is_tic(ccw))
return;
kfree(phys_to_virt(ccw->cda));
}
/**
* ccwchain_calc_length - calculate the length of the ccw chain.
* @iova: guest physical address of the target ccw chain
* @cp: channel_program on which to perform the operation
*
* This is the chain length not considering any TICs.
* You need to do a new round for each TIC target.
*
* The program is also validated for absence of not yet supported
* indirect data addressing scenarios.
*
* Returns: the length of the ccw chain or -errno.
*/
static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
{
struct ccw1 *ccw = cp->guest_cp;
int cnt = 0;
do {
cnt++;
/*
* We want to keep counting if the current CCW has the
* command-chaining flag enabled, or if it is a TIC CCW
* that loops back into the current chain. The latter
* is used for device orientation, where the CCW PRIOR to
* the TIC can either jump to the TIC or a CCW immediately
* after the TIC, depending on the results of its operation.
*/
if (!ccw_is_chain(ccw) && !is_tic_within_range(ccw, iova, cnt))
break;
ccw++;
} while (cnt < CCWCHAIN_LEN_MAX + 1);
if (cnt == CCWCHAIN_LEN_MAX + 1)
cnt = -EINVAL;
return cnt;
}
static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
{
struct ccwchain *chain;
u32 ccw_head;
list_for_each_entry(chain, &cp->ccwchain_list, next) {
ccw_head = chain->ch_iova;
if (is_cpa_within_range(tic->cda, ccw_head, chain->ch_len))
return 1;
}
return 0;
}
static int ccwchain_loop_tic(struct ccwchain *chain,
struct channel_program *cp);
static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccwchain *chain;
int len, ret;
/* Copy 2K (the most we support today) of possible CCWs */
ret = vfio_dma_rw(vdev, cda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false);
if (ret)
return ret;
/* Convert any Format-0 CCWs to Format-1 */
if (!cp->orb.cmd.fmt)
convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX);
/* Count the CCWs in the current chain */
len = ccwchain_calc_length(cda, cp);
if (len < 0)
return len;
/* Need alloc a new chain for this one. */
chain = ccwchain_alloc(cp, len);
if (!chain)
return -ENOMEM;
chain->ch_len = len;
chain->ch_iova = cda;
/* Copy the actual CCWs into the new chain */
memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
/* Loop for tics on this new chain. */
ret = ccwchain_loop_tic(chain, cp);
if (ret)
ccwchain_free(chain);
return ret;
}
/* Loop for TICs. */
static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
{
struct ccw1 *tic;
int i, ret;
for (i = 0; i < chain->ch_len; i++) {
tic = &chain->ch_ccw[i];
if (!ccw_is_tic(tic))
continue;
/* May transfer to an existing chain. */
if (tic_target_chain_exists(tic, cp))
continue;
/* Build a ccwchain for the next segment */
ret = ccwchain_handle_ccw(tic->cda, cp);
if (ret)
return ret;
}
return 0;
}
static int ccwchain_fetch_tic(struct ccw1 *ccw,
struct channel_program *cp)
{
struct ccwchain *iter;
u32 ccw_head;
list_for_each_entry(iter, &cp->ccwchain_list, next) {
ccw_head = iter->ch_iova;
if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
(ccw->cda - ccw_head));
return 0;
}
}
return -EFAULT;
}
static unsigned long *get_guest_idal(struct ccw1 *ccw,
struct channel_program *cp,
int idaw_nr)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
unsigned long *idaws;
unsigned int *idaws_f1;
int idal_len = idaw_nr * sizeof(*idaws);
int idaw_size = idal_is_2k(cp) ? PAGE_SIZE / 2 : PAGE_SIZE;
int idaw_mask = ~(idaw_size - 1);
int i, ret;
idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
if (!idaws)
return ERR_PTR(-ENOMEM);
if (ccw_is_idal(ccw)) {
/* Copy IDAL from guest */
ret = vfio_dma_rw(vdev, ccw->cda, idaws, idal_len, false);
if (ret) {
kfree(idaws);
return ERR_PTR(ret);
}
} else {
/* Fabricate an IDAL based off CCW data address */
if (cp->orb.cmd.c64) {
idaws[0] = ccw->cda;
for (i = 1; i < idaw_nr; i++)
idaws[i] = (idaws[i - 1] + idaw_size) & idaw_mask;
} else {
idaws_f1 = (unsigned int *)idaws;
idaws_f1[0] = ccw->cda;
for (i = 1; i < idaw_nr; i++)
idaws_f1[i] = (idaws_f1[i - 1] + idaw_size) & idaw_mask;
}
}
return idaws;
}
/*
* ccw_count_idaws() - Calculate the number of IDAWs needed to transfer
* a specified amount of data
*
* @ccw: The Channel Command Word being translated
* @cp: Channel Program being processed
*
* The ORB is examined, since it specifies what IDAWs could actually be
* used by any CCW in the channel program, regardless of whether or not
* the CCW actually does. An ORB that does not specify Format-2-IDAW
* Control could still contain a CCW with an IDAL, which would be
* Format-1 and thus only move 2K with each IDAW. Thus all CCWs within
* the channel program must follow the same size requirements.
*/
static int ccw_count_idaws(struct ccw1 *ccw,
struct channel_program *cp)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
u64 iova;
int size = cp->orb.cmd.c64 ? sizeof(u64) : sizeof(u32);
int ret;
int bytes = 1;
if (ccw->count)
bytes = ccw->count;
if (ccw_is_idal(ccw)) {
/* Read first IDAW to check its starting address. */
/* All subsequent IDAWs will be 2K- or 4K-aligned. */
ret = vfio_dma_rw(vdev, ccw->cda, &iova, size, false);
if (ret)
return ret;
/*
* Format-1 IDAWs only occupy the first 32 bits,
* and bit 0 is always off.
*/
if (!cp->orb.cmd.c64)
iova = iova >> 32;
} else {
iova = ccw->cda;
}
/* Format-1 IDAWs operate on 2K each */
if (!cp->orb.cmd.c64)
return idal_2k_nr_words((void *)iova, bytes);
/* Using the 2K variant of Format-2 IDAWs? */
if (cp->orb.cmd.i2k)
return idal_2k_nr_words((void *)iova, bytes);
/* The 'usual' case is 4K Format-2 IDAWs */
return idal_nr_words((void *)iova, bytes);
}
static int ccwchain_fetch_ccw(struct ccw1 *ccw,
struct page_array *pa,
struct channel_program *cp)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
unsigned long *idaws;
unsigned int *idaws_f1;
int ret;
int idaw_nr;
int i;
/* Calculate size of IDAL */
idaw_nr = ccw_count_idaws(ccw, cp);
if (idaw_nr < 0)
return idaw_nr;
/* Allocate an IDAL from host storage */
idaws = get_guest_idal(ccw, cp, idaw_nr);
if (IS_ERR(idaws)) {
ret = PTR_ERR(idaws);
goto out_init;
}
/*
* Allocate an array of pages to pin/translate.
* The number of pages is actually the count of the idaws
* required for the data transfer, since we only only support
* 4K IDAWs today.
*/
ret = page_array_alloc(pa, idaw_nr);
if (ret < 0)
goto out_free_idaws;
/*
* Copy guest IDAWs into page_array, in case the memory they
* occupy is not contiguous.
*/
idaws_f1 = (unsigned int *)idaws;
for (i = 0; i < idaw_nr; i++) {
if (cp->orb.cmd.c64)
pa->pa_iova[i] = idaws[i];
else
pa->pa_iova[i] = idaws_f1[i];
}
if (ccw_does_data_transfer(ccw)) {
ret = page_array_pin(pa, vdev, idal_is_2k(cp));
if (ret < 0)
goto out_unpin;
} else {
pa->pa_nr = 0;
}
ccw->cda = (__u32) virt_to_phys(idaws);
ccw->flags |= CCW_FLAG_IDA;
/* Populate the IDAL with pinned/translated addresses from page */
page_array_idal_create_words(pa, idaws);
return 0;
out_unpin:
page_array_unpin_free(pa, vdev, idal_is_2k(cp));
out_free_idaws:
kfree(idaws);
out_init:
ccw->cda = 0;
return ret;
}
/*
* Fetch one ccw.
* To reduce memory copy, we'll pin the cda page in memory,
* and to get rid of the cda 2G limitation of ccw1, we'll translate
* direct ccws to idal ccws.
*/
static int ccwchain_fetch_one(struct ccw1 *ccw,
struct page_array *pa,
struct channel_program *cp)
{
if (ccw_is_tic(ccw))
return ccwchain_fetch_tic(ccw, cp);
return ccwchain_fetch_ccw(ccw, pa, cp);
}
/**
* cp_init() - allocate ccwchains for a channel program.
* @cp: channel_program on which to perform the operation
* @orb: control block for the channel program from the guest
*
* This creates one or more ccwchain(s), and copies the raw data of
* the target channel program from @orb->cmd.iova to the new ccwchain(s).
*
* Limitations:
* 1. Supports idal(c64) ccw chaining.
* 2. Supports 4k idaw.
*
* Returns:
* %0 on success and a negative error value on failure.
*/
int cp_init(struct channel_program *cp, union orb *orb)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
/* custom ratelimit used to avoid flood during guest IPL */
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1);
int ret;
/* this is an error in the caller */
if (cp->initialized)
return -EBUSY;
/*
* We only support prefetching the channel program. We assume all channel
* programs executed by supported guests likewise support prefetching.
* Executing a channel program that does not specify prefetching will
* typically not cause an error, but a warning is issued to help identify
* the problem if something does break.
*/
if (!orb->cmd.pfch && __ratelimit(&ratelimit_state))
dev_warn(
vdev->dev,
"Prefetching channel program even though prefetch not specified in ORB");
INIT_LIST_HEAD(&cp->ccwchain_list);
memcpy(&cp->orb, orb, sizeof(*orb));
/* Build a ccwchain for the first CCW segment */
ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
if (!ret)
cp->initialized = true;
return ret;
}
/**
* cp_free() - free resources for channel program.
* @cp: channel_program on which to perform the operation
*
* This unpins the memory pages and frees the memory space occupied by
* @cp, which must have been returned by a previous call to cp_init().
* Otherwise, undefined behavior occurs.
*/
void cp_free(struct channel_program *cp)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccwchain *chain, *temp;
int i;
if (!cp->initialized)
return;
cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) {
page_array_unpin_free(&chain->ch_pa[i], vdev, idal_is_2k(cp));
ccwchain_cda_free(chain, i);
}
ccwchain_free(chain);
}
}
/**
* cp_prefetch() - translate a guest physical address channel program to
* a real-device runnable channel program.
* @cp: channel_program on which to perform the operation
*
* This function translates the guest-physical-address channel program
* and stores the result to ccwchain list. @cp must have been
* initialized by a previous call with cp_init(). Otherwise, undefined
* behavior occurs.
* For each chain composing the channel program:
* - On entry ch_len holds the count of CCWs to be translated.
* - On exit ch_len is adjusted to the count of successfully translated CCWs.
* This allows cp_free to find in ch_len the count of CCWs to free in a chain.
*
* The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
* as helpers to do ccw chain translation inside the kernel. Basically
* they accept a channel program issued by a virtual machine, and
* translate the channel program to a real-device runnable channel
* program.
*
* These APIs will copy the ccws into kernel-space buffers, and update
* the guest physical addresses with their corresponding host physical
* addresses. Then channel I/O device drivers could issue the
* translated channel program to real devices to perform an I/O
* operation.
*
* These interfaces are designed to support translation only for
* channel programs, which are generated and formatted by a
* guest. Thus this will make it possible for things like VFIO to
* leverage the interfaces to passthrough a channel I/O mediated
* device in QEMU.
*
* We support direct ccw chaining by translating them to idal ccws.
*
* Returns:
* %0 on success and a negative error value on failure.
*/
int cp_prefetch(struct channel_program *cp)
{
struct ccwchain *chain;
struct ccw1 *ccw;
struct page_array *pa;
int len, idx, ret;
/* this is an error in the caller */
if (!cp->initialized)
return -EINVAL;
list_for_each_entry(chain, &cp->ccwchain_list, next) {
len = chain->ch_len;
for (idx = 0; idx < len; idx++) {
ccw = &chain->ch_ccw[idx];
pa = &chain->ch_pa[idx];
ret = ccwchain_fetch_one(ccw, pa, cp);
if (ret)
goto out_err;
}
}
return 0;
out_err:
/* Only cleanup the chain elements that were actually translated. */
chain->ch_len = idx;
list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
chain->ch_len = 0;
}
return ret;
}
/**
* cp_get_orb() - get the orb of the channel program
* @cp: channel_program on which to perform the operation
* @sch: subchannel the operation will be performed against
*
* This function returns the address of the updated orb of the channel
* program. Channel I/O device drivers could use this orb to issue a
* ssch.
*/
union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch)
{
union orb *orb;
struct ccwchain *chain;
struct ccw1 *cpa;
/* this is an error in the caller */
if (!cp->initialized)
return NULL;
orb = &cp->orb;
orb->cmd.intparm = (u32)virt_to_phys(sch);
orb->cmd.fmt = 1;
/*
* Everything built by vfio-ccw is a Format-2 IDAL.
* If the input was a Format-1 IDAL, indicate that
* 2K Format-2 IDAWs were created here.
*/
if (!orb->cmd.c64)
orb->cmd.i2k = 1;
orb->cmd.c64 = 1;
if (orb->cmd.lpm == 0)
orb->cmd.lpm = sch->lpm;
chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
cpa = chain->ch_ccw;
orb->cmd.cpa = (__u32)virt_to_phys(cpa);
return orb;
}
/**
* cp_update_scsw() - update scsw for a channel program.
* @cp: channel_program on which to perform the operation
* @scsw: I/O results of the channel program and also the target to be
* updated
*
* @scsw contains the I/O results of the channel program that pointed
* to by @cp. However what @scsw->cpa stores is a host physical
* address, which is meaningless for the guest, which is waiting for
* the I/O results.
*
* This function updates @scsw->cpa to its coressponding guest physical
* address.
*/
void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
{
struct ccwchain *chain;
u32 cpa = scsw->cmd.cpa;
u32 ccw_head;
if (!cp->initialized)
return;
/*
* LATER:
* For now, only update the cmd.cpa part. We may need to deal with
* other portions of the schib as well, even if we don't return them
* in the ioctl directly. Path status changes etc.
*/
list_for_each_entry(chain, &cp->ccwchain_list, next) {
ccw_head = (u32)(u64)chain->ch_ccw;
/*
* On successful execution, cpa points just beyond the end
* of the chain.
*/
if (is_cpa_within_range(cpa, ccw_head, chain->ch_len + 1)) {
/*
* (cpa - ccw_head) is the offset value of the host
* physical ccw to its chain head.
* Adding this value to the guest physical ccw chain
* head gets us the guest cpa.
*/
cpa = chain->ch_iova + (cpa - ccw_head);
break;
}
}
scsw->cmd.cpa = cpa;
}
/**
* cp_iova_pinned() - check if an iova is pinned for a ccw chain.
* @cp: channel_program on which to perform the operation
* @iova: the iova to check
* @length: the length to check from @iova
*
* If the @iova is currently pinned for the ccw chain, return true;
* else return false.
*/
bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length)
{
struct ccwchain *chain;
int i;
if (!cp->initialized)
return false;
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
if (page_array_iova_pinned(&chain->ch_pa[i], iova, length))
return true;
}
return false;
}
| linux-master | drivers/s390/cio/vfio_ccw_cp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Finite state machine for vfio-ccw device handling
*
* Copyright IBM Corp. 2017
* Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <[email protected]>
* Cornelia Huck <[email protected]>
*/
#include <linux/vfio.h>
#include <asm/isc.h>
#include "ioasm.h"
#include "vfio_ccw_private.h"
static int fsm_io_helper(struct vfio_ccw_private *private)
{
struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
union orb *orb;
int ccode;
__u8 lpm;
unsigned long flags;
int ret;
spin_lock_irqsave(sch->lock, flags);
orb = cp_get_orb(&private->cp, sch);
if (!orb) {
ret = -EIO;
goto out;
}
VFIO_CCW_TRACE_EVENT(5, "stIO");
VFIO_CCW_TRACE_EVENT(5, dev_name(&sch->dev));
/* Issue "Start Subchannel" */
ccode = ssch(sch->schid, orb);
VFIO_CCW_HEX_EVENT(5, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
/*
* Initialize device status information
*/
sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
ret = 0;
private->state = VFIO_CCW_STATE_CP_PENDING;
break;
case 1: /* Status pending */
case 2: /* Busy */
ret = -EBUSY;
break;
case 3: /* Device/path not operational */
{
lpm = orb->cmd.lpm;
if (lpm != 0)
sch->lpm &= ~lpm;
else
sch->lpm = 0;
if (cio_update_schib(sch))
ret = -ENODEV;
else
ret = sch->lpm ? -EACCES : -ENODEV;
break;
}
default:
ret = ccode;
}
out:
spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
static int fsm_do_halt(struct vfio_ccw_private *private)
{
struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
unsigned long flags;
int ccode;
int ret;
spin_lock_irqsave(sch->lock, flags);
VFIO_CCW_TRACE_EVENT(2, "haltIO");
VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
/* Issue "Halt Subchannel" */
ccode = hsch(sch->schid);
VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
/*
* Initialize device status information
*/
sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
ret = 0;
break;
case 1: /* Status pending */
case 2: /* Busy */
ret = -EBUSY;
break;
case 3: /* Device not operational */
ret = -ENODEV;
break;
default:
ret = ccode;
}
spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
static int fsm_do_clear(struct vfio_ccw_private *private)
{
struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
unsigned long flags;
int ccode;
int ret;
spin_lock_irqsave(sch->lock, flags);
VFIO_CCW_TRACE_EVENT(2, "clearIO");
VFIO_CCW_TRACE_EVENT(2, dev_name(&sch->dev));
/* Issue "Clear Subchannel" */
ccode = csch(sch->schid);
VFIO_CCW_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
/*
* Initialize device status information
*/
sch->schib.scsw.cmd.actl = SCSW_ACTL_CLEAR_PEND;
/* TODO: check what else we might need to clear */
ret = 0;
break;
case 3: /* Device not operational */
ret = -ENODEV;
break;
default:
ret = ccode;
}
spin_unlock_irqrestore(sch->lock, flags);
return ret;
}
static void fsm_notoper(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: notoper event %x state %x\n",
sch->schid.cssid,
sch->schid.ssid,
sch->schid.sch_no,
event,
private->state);
/*
* TODO:
* Probably we should send the machine check to the guest.
*/
css_sched_sch_todo(sch, SCH_TODO_UNREG);
private->state = VFIO_CCW_STATE_NOT_OPER;
/* This is usually handled during CLOSE event */
cp_free(&private->cp);
}
/*
* No operation action.
*/
static void fsm_nop(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
}
static void fsm_io_error(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
private->io_region->ret_code = -EIO;
}
static void fsm_io_busy(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
private->io_region->ret_code = -EBUSY;
}
static void fsm_io_retry(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
private->io_region->ret_code = -EAGAIN;
}
static void fsm_async_error(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
struct ccw_cmd_region *cmd_region = private->cmd_region;
pr_err("vfio-ccw: FSM: %s request from state:%d\n",
cmd_region->command == VFIO_CCW_ASYNC_CMD_HSCH ? "halt" :
cmd_region->command == VFIO_CCW_ASYNC_CMD_CSCH ? "clear" :
"<unknown>", private->state);
cmd_region->ret_code = -EIO;
}
static void fsm_async_retry(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
private->cmd_region->ret_code = -EAGAIN;
}
static void fsm_disabled_irq(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
/*
* An interrupt in a disabled state means a previous disable was not
* successful - should not happen, but we try to disable again.
*/
cio_disable_subchannel(sch);
}
inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
{
struct subchannel *sch = to_subchannel(p->vdev.dev->parent);
return sch->schid;
}
/*
* Deal with the ccw command request from the userspace.
*/
static void fsm_io_request(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
union orb *orb;
union scsw *scsw = &private->scsw;
struct ccw_io_region *io_region = private->io_region;
char *errstr = "request";
struct subchannel_id schid = get_schid(private);
private->state = VFIO_CCW_STATE_CP_PROCESSING;
memcpy(scsw, io_region->scsw_area, sizeof(*scsw));
if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) {
orb = (union orb *)io_region->orb_area;
/* Don't try to build a cp if transport mode is specified. */
if (orb->tm.b) {
io_region->ret_code = -EOPNOTSUPP;
VFIO_CCW_MSG_EVENT(2,
"sch %x.%x.%04x: transport mode\n",
schid.cssid,
schid.ssid, schid.sch_no);
errstr = "transport mode";
goto err_out;
}
io_region->ret_code = cp_init(&private->cp, orb);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
"sch %x.%x.%04x: cp_init=%d\n",
schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp init";
goto err_out;
}
io_region->ret_code = cp_prefetch(&private->cp);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
"sch %x.%x.%04x: cp_prefetch=%d\n",
schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp prefetch";
cp_free(&private->cp);
goto err_out;
}
/* Start channel program and wait for I/O interrupt. */
io_region->ret_code = fsm_io_helper(private);
if (io_region->ret_code) {
VFIO_CCW_MSG_EVENT(2,
"sch %x.%x.%04x: fsm_io_helper=%d\n",
schid.cssid,
schid.ssid, schid.sch_no,
io_region->ret_code);
errstr = "cp fsm_io_helper";
cp_free(&private->cp);
goto err_out;
}
return;
} else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) {
VFIO_CCW_MSG_EVENT(2,
"sch %x.%x.%04x: halt on io_region\n",
schid.cssid,
schid.ssid, schid.sch_no);
/* halt is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
} else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
VFIO_CCW_MSG_EVENT(2,
"sch %x.%x.%04x: clear on io_region\n",
schid.cssid,
schid.ssid, schid.sch_no);
/* clear is handled via the async cmd region */
io_region->ret_code = -EOPNOTSUPP;
goto err_out;
}
err_out:
private->state = VFIO_CCW_STATE_IDLE;
trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid,
io_region->ret_code, errstr);
}
/*
* Deal with an async request from userspace.
*/
static void fsm_async_request(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
struct ccw_cmd_region *cmd_region = private->cmd_region;
switch (cmd_region->command) {
case VFIO_CCW_ASYNC_CMD_HSCH:
cmd_region->ret_code = fsm_do_halt(private);
break;
case VFIO_CCW_ASYNC_CMD_CSCH:
cmd_region->ret_code = fsm_do_clear(private);
break;
default:
/* should not happen? */
cmd_region->ret_code = -EINVAL;
}
trace_vfio_ccw_fsm_async_request(get_schid(private),
cmd_region->command,
cmd_region->ret_code);
}
/*
* Got an interrupt for a normal io (state busy).
*/
static void fsm_irq(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
struct irb *irb = this_cpu_ptr(&cio_irb);
VFIO_CCW_TRACE_EVENT(6, "IRQ");
VFIO_CCW_TRACE_EVENT(6, dev_name(&sch->dev));
memcpy(&private->irb, irb, sizeof(*irb));
queue_work(vfio_ccw_work_q, &private->io_work);
if (private->completion)
complete(private->completion);
}
static void fsm_open(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
int ret;
spin_lock_irq(sch->lock);
sch->isc = VFIO_CCW_ISC;
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
if (ret)
goto err_unlock;
private->state = VFIO_CCW_STATE_IDLE;
spin_unlock_irq(sch->lock);
return;
err_unlock:
spin_unlock_irq(sch->lock);
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}
static void fsm_close(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
int ret;
spin_lock_irq(sch->lock);
if (!sch->schib.pmcw.ena)
goto err_unlock;
ret = cio_disable_subchannel(sch);
if (ret == -EBUSY)
ret = vfio_ccw_sch_quiesce(sch);
if (ret)
goto err_unlock;
private->state = VFIO_CCW_STATE_STANDBY;
spin_unlock_irq(sch->lock);
cp_free(&private->cp);
return;
err_unlock:
spin_unlock_irq(sch->lock);
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}
/*
* Device statemachine
*/
fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = {
[VFIO_CCW_STATE_NOT_OPER] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_nop,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
[VFIO_CCW_EVENT_OPEN] = fsm_nop,
[VFIO_CCW_EVENT_CLOSE] = fsm_nop,
},
[VFIO_CCW_STATE_STANDBY] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_error,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_error,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq,
[VFIO_CCW_EVENT_OPEN] = fsm_open,
[VFIO_CCW_EVENT_CLOSE] = fsm_notoper,
},
[VFIO_CCW_STATE_IDLE] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_request,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
[VFIO_CCW_EVENT_OPEN] = fsm_notoper,
[VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PROCESSING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_retry,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_retry,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
[VFIO_CCW_EVENT_OPEN] = fsm_notoper,
[VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
[VFIO_CCW_STATE_CP_PENDING] = {
[VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper,
[VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy,
[VFIO_CCW_EVENT_ASYNC_REQ] = fsm_async_request,
[VFIO_CCW_EVENT_INTERRUPT] = fsm_irq,
[VFIO_CCW_EVENT_OPEN] = fsm_notoper,
[VFIO_CCW_EVENT_CLOSE] = fsm_close,
},
};
| linux-master | drivers/s390/cio/vfio_ccw_fsm.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* Copyright IBM Corp. 2002, 2009
*
* Author(s): Martin Schwidefsky ([email protected])
* Cornelia Huck ([email protected])
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <asm/ccwdev.h>
#include <asm/idals.h>
#include <asm/chpid.h>
#include <asm/fcx.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "chsc.h"
#include "device.h"
#include "chp.h"
/**
* ccw_device_set_options_mask() - set some options and unset the rest
* @cdev: device for which the options are to be set
* @flags: options to be set
*
* All flags specified in @flags are set, all flags not specified in @flags
* are cleared.
* Returns:
* %0 on success, -%EINVAL on an invalid flag combination.
*/
int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
{
/*
* The flag usage is mutal exclusive ...
*/
if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
(flags & CCWDEV_REPORT_ALL))
return -EINVAL;
cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
cdev->private->options.mpath = (flags & CCWDEV_DO_MULTIPATH) != 0;
return 0;
}
/**
* ccw_device_set_options() - set some options
* @cdev: device for which the options are to be set
* @flags: options to be set
*
* All flags specified in @flags are set, the remainder is left untouched.
* Returns:
* %0 on success, -%EINVAL if an invalid flag combination would ensue.
*/
int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
{
/*
* The flag usage is mutal exclusive ...
*/
if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
(flags & CCWDEV_REPORT_ALL)) ||
((flags & CCWDEV_EARLY_NOTIFICATION) &&
cdev->private->options.repall) ||
((flags & CCWDEV_REPORT_ALL) &&
cdev->private->options.fast))
return -EINVAL;
cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
cdev->private->options.mpath |= (flags & CCWDEV_DO_MULTIPATH) != 0;
return 0;
}
/**
* ccw_device_clear_options() - clear some options
* @cdev: device for which the options are to be cleared
* @flags: options to be cleared
*
* All flags specified in @flags are cleared, the remainder is left untouched.
*/
void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
{
cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
cdev->private->options.mpath &= (flags & CCWDEV_DO_MULTIPATH) == 0;
}
/**
* ccw_device_is_pathgroup() - determine if paths to this device are grouped
* @cdev: ccw device
*
* Return non-zero if there is a path group, zero otherwise.
*/
int ccw_device_is_pathgroup(struct ccw_device *cdev)
{
return cdev->private->flags.pgroup;
}
EXPORT_SYMBOL(ccw_device_is_pathgroup);
/**
* ccw_device_is_multipath() - determine if device is operating in multipath mode
* @cdev: ccw device
*
* Return non-zero if device is operating in multipath mode, zero otherwise.
*/
int ccw_device_is_multipath(struct ccw_device *cdev)
{
return cdev->private->flags.mpath;
}
EXPORT_SYMBOL(ccw_device_is_multipath);
/**
* ccw_device_clear() - terminate I/O request processing
* @cdev: target ccw device
* @intparm: interruption parameter to be returned upon conclusion of csch
*
* ccw_device_clear() calls csch on @cdev's subchannel.
* Returns:
* %0 on success,
* -%ENODEV on device not operational,
* -%EINVAL on invalid device state.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
{
struct subchannel *sch;
int ret;
if (!cdev || !cdev->dev.parent)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
ret = cio_clear(sch);
if (ret == 0)
cdev->private->intparm = intparm;
return ret;
}
/**
* ccw_device_start_timeout_key() - start a s390 channel program with timeout and key
* @cdev: target ccw device
* @cpa: logical start address of channel program
* @intparm: user specific interruption parameter; will be presented back to
* @cdev's interrupt handler. Allows a device driver to associate
* the interrupt with a particular I/O request.
* @lpm: defines the channel path to be used for a specific I/O request. A
* value of 0 will make cio use the opm.
* @key: storage key to be used for the I/O
* @flags: additional flags; defines the action to be performed for I/O
* processing.
* @expires: timeout value in jiffies
*
* Start a S/390 channel program. When the interrupt arrives, the
* IRQ handler is called, either immediately, delayed (dev-end missing,
* or sense required) or never (no IRQ handler registered).
* This function notifies the device driver if the channel program has not
* completed during the time specified by @expires. If a timeout occurs, the
* channel program is terminated via xsch, hsch or csch, and the device's
* interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
* The interruption handler will echo back the @intparm specified here, unless
* another interruption parameter is specified by a subsequent invocation of
* ccw_device_halt() or ccw_device_clear().
* Returns:
* %0, if the operation was successful;
* -%EBUSY, if the device is busy, or status pending;
* -%EACCES, if no path specified in @lpm is operational;
* -%ENODEV, if the device is not operational.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
unsigned long intparm, __u8 lpm, __u8 key,
unsigned long flags, int expires)
{
struct subchannel *sch;
int ret;
if (!cdev || !cdev->dev.parent)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state == DEV_STATE_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
return 0;
} else
/* There's already a fake I/O around. */
return -EBUSY;
}
if (cdev->private->state != DEV_STATE_ONLINE ||
((sch->schib.scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
!(sch->schib.scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS)) ||
cdev->private->flags.doverify)
return -EBUSY;
ret = cio_set_options (sch, flags);
if (ret)
return ret;
/* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
ret = cio_start_key (sch, cpa, lpm, key);
switch (ret) {
case 0:
cdev->private->intparm = intparm;
if (expires)
ccw_device_set_timeout(cdev, expires);
break;
case -EACCES:
case -ENODEV:
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
break;
}
return ret;
}
/**
* ccw_device_start_key() - start a s390 channel program with key
* @cdev: target ccw device
* @cpa: logical start address of channel program
* @intparm: user specific interruption parameter; will be presented back to
* @cdev's interrupt handler. Allows a device driver to associate
* the interrupt with a particular I/O request.
* @lpm: defines the channel path to be used for a specific I/O request. A
* value of 0 will make cio use the opm.
* @key: storage key to be used for the I/O
* @flags: additional flags; defines the action to be performed for I/O
* processing.
*
* Start a S/390 channel program. When the interrupt arrives, the
* IRQ handler is called, either immediately, delayed (dev-end missing,
* or sense required) or never (no IRQ handler registered).
* The interruption handler will echo back the @intparm specified here, unless
* another interruption parameter is specified by a subsequent invocation of
* ccw_device_halt() or ccw_device_clear().
* Returns:
* %0, if the operation was successful;
* -%EBUSY, if the device is busy, or status pending;
* -%EACCES, if no path specified in @lpm is operational;
* -%ENODEV, if the device is not operational.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
unsigned long intparm, __u8 lpm, __u8 key,
unsigned long flags)
{
return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, key,
flags, 0);
}
/**
* ccw_device_start() - start a s390 channel program
* @cdev: target ccw device
* @cpa: logical start address of channel program
* @intparm: user specific interruption parameter; will be presented back to
* @cdev's interrupt handler. Allows a device driver to associate
* the interrupt with a particular I/O request.
* @lpm: defines the channel path to be used for a specific I/O request. A
* value of 0 will make cio use the opm.
* @flags: additional flags; defines the action to be performed for I/O
* processing.
*
* Start a S/390 channel program. When the interrupt arrives, the
* IRQ handler is called, either immediately, delayed (dev-end missing,
* or sense required) or never (no IRQ handler registered).
* The interruption handler will echo back the @intparm specified here, unless
* another interruption parameter is specified by a subsequent invocation of
* ccw_device_halt() or ccw_device_clear().
* Returns:
* %0, if the operation was successful;
* -%EBUSY, if the device is busy, or status pending;
* -%EACCES, if no path specified in @lpm is operational;
* -%ENODEV, if the device is not operational.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
unsigned long intparm, __u8 lpm, unsigned long flags)
{
return ccw_device_start_key(cdev, cpa, intparm, lpm,
PAGE_DEFAULT_KEY, flags);
}
/**
* ccw_device_start_timeout() - start a s390 channel program with timeout
* @cdev: target ccw device
* @cpa: logical start address of channel program
* @intparm: user specific interruption parameter; will be presented back to
* @cdev's interrupt handler. Allows a device driver to associate
* the interrupt with a particular I/O request.
* @lpm: defines the channel path to be used for a specific I/O request. A
* value of 0 will make cio use the opm.
* @flags: additional flags; defines the action to be performed for I/O
* processing.
* @expires: timeout value in jiffies
*
* Start a S/390 channel program. When the interrupt arrives, the
* IRQ handler is called, either immediately, delayed (dev-end missing,
* or sense required) or never (no IRQ handler registered).
* This function notifies the device driver if the channel program has not
* completed during the time specified by @expires. If a timeout occurs, the
* channel program is terminated via xsch, hsch or csch, and the device's
* interrupt handler will be called with an irb containing ERR_PTR(-%ETIMEDOUT).
* The interruption handler will echo back the @intparm specified here, unless
* another interruption parameter is specified by a subsequent invocation of
* ccw_device_halt() or ccw_device_clear().
* Returns:
* %0, if the operation was successful;
* -%EBUSY, if the device is busy, or status pending;
* -%EACCES, if no path specified in @lpm is operational;
* -%ENODEV, if the device is not operational.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
unsigned long intparm, __u8 lpm,
unsigned long flags, int expires)
{
return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
PAGE_DEFAULT_KEY, flags,
expires);
}
/**
* ccw_device_halt() - halt I/O request processing
* @cdev: target ccw device
* @intparm: interruption parameter to be returned upon conclusion of hsch
*
* ccw_device_halt() calls hsch on @cdev's subchannel.
* The interruption handler will echo back the @intparm specified here, unless
* another interruption parameter is specified by a subsequent invocation of
* ccw_device_clear().
* Returns:
* %0 on success,
* -%ENODEV on device not operational,
* -%EINVAL on invalid device state,
* -%EBUSY on device busy or interrupt pending.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
{
struct subchannel *sch;
int ret;
if (!cdev || !cdev->dev.parent)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE &&
cdev->private->state != DEV_STATE_W4SENSE)
return -EINVAL;
ret = cio_halt(sch);
if (ret == 0)
cdev->private->intparm = intparm;
return ret;
}
/**
* ccw_device_resume() - resume channel program execution
* @cdev: target ccw device
*
* ccw_device_resume() calls rsch on @cdev's subchannel.
* Returns:
* %0 on success,
* -%ENODEV on device not operational,
* -%EINVAL on invalid device state,
* -%EBUSY on device busy or interrupt pending.
* Context:
* Interrupts disabled, ccw device lock held
*/
int ccw_device_resume(struct ccw_device *cdev)
{
struct subchannel *sch;
if (!cdev || !cdev->dev.parent)
return -ENODEV;
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return -ENODEV;
if (cdev->private->state != DEV_STATE_ONLINE ||
!(sch->schib.scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
return -EINVAL;
return cio_resume(sch);
}
/**
* ccw_device_get_ciw() - Search for CIW command in extended sense data.
* @cdev: ccw device to inspect
* @ct: command type to look for
*
* During SenseID, command information words (CIWs) describing special
* commands available to the device may have been stored in the extended
* sense data. This function searches for CIWs of a specified command
* type in the extended sense data.
* Returns:
* %NULL if no extended sense data has been stored or if no CIW of the
* specified command type could be found,
* else a pointer to the CIW of the specified command type.
*/
struct ciw *ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
{
int ciw_cnt;
if (cdev->private->flags.esid == 0)
return NULL;
for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
return cdev->private->dma_area->senseid.ciw + ciw_cnt;
return NULL;
}
/**
* ccw_device_get_path_mask() - get currently available paths
* @cdev: ccw device to be queried
* Returns:
* %0 if no subchannel for the device is available,
* else the mask of currently available paths for the ccw device's subchannel.
*/
__u8 ccw_device_get_path_mask(struct ccw_device *cdev)
{
struct subchannel *sch;
if (!cdev->dev.parent)
return 0;
sch = to_subchannel(cdev->dev.parent);
return sch->lpm;
}
/**
* ccw_device_get_chp_desc() - return newly allocated channel-path descriptor
* @cdev: device to obtain the descriptor for
* @chp_idx: index of the channel path
*
* On success return a newly allocated copy of the channel-path description
* data associated with the given channel path. Return %NULL on error.
*/
struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *cdev,
int chp_idx)
{
struct subchannel *sch;
struct chp_id chpid;
sch = to_subchannel(cdev->dev.parent);
chp_id_init(&chpid);
chpid.id = sch->schib.pmcw.chpid[chp_idx];
return chp_get_chp_desc(chpid);
}
/**
* ccw_device_get_util_str() - return newly allocated utility strings
* @cdev: device to obtain the utility strings for
* @chp_idx: index of the channel path
*
* On success return a newly allocated copy of the utility strings
* associated with the given channel path. Return %NULL on error.
*/
u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct channel_path *chp;
struct chp_id chpid;
u8 *util_str;
chp_id_init(&chpid);
chpid.id = sch->schib.pmcw.chpid[chp_idx];
chp = chpid_to_chp(chpid);
util_str = kmalloc(sizeof(chp->desc_fmt3.util_str), GFP_KERNEL);
if (!util_str)
return NULL;
mutex_lock(&chp->lock);
memcpy(util_str, chp->desc_fmt3.util_str, sizeof(chp->desc_fmt3.util_str));
mutex_unlock(&chp->lock);
return util_str;
}
/**
* ccw_device_get_id() - obtain a ccw device id
* @cdev: device to obtain the id for
* @dev_id: where to fill in the values
*/
void ccw_device_get_id(struct ccw_device *cdev, struct ccw_dev_id *dev_id)
{
*dev_id = cdev->private->dev_id;
}
EXPORT_SYMBOL(ccw_device_get_id);
/**
* ccw_device_tm_start_timeout_key() - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
* @lpm: mask of paths to use
* @key: storage key to use for storage access
* @expires: time span in jiffies after which to abort request
*
* Start the tcw on the given ccw device. Return zero on success, non-zero
* otherwise.
*/
int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm, u8 key,
int expires)
{
struct subchannel *sch;
int rc;
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
if (cdev->private->state == DEV_STATE_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_TM_IRB;
cdev->private->intparm = intparm;
return 0;
} else
/* There's already a fake I/O around. */
return -EBUSY;
}
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
/* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
rc = cio_tm_start_key(sch, tcw, lpm, key);
if (rc == 0) {
cdev->private->intparm = intparm;
if (expires)
ccw_device_set_timeout(cdev, expires);
}
return rc;
}
EXPORT_SYMBOL(ccw_device_tm_start_timeout_key);
/**
* ccw_device_tm_start_key() - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
* @lpm: mask of paths to use
* @key: storage key to use for storage access
*
* Start the tcw on the given ccw device. Return zero on success, non-zero
* otherwise.
*/
int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm, u8 key)
{
return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm, key, 0);
}
EXPORT_SYMBOL(ccw_device_tm_start_key);
/**
* ccw_device_tm_start() - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
* @lpm: mask of paths to use
*
* Start the tcw on the given ccw device. Return zero on success, non-zero
* otherwise.
*/
int ccw_device_tm_start(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm)
{
return ccw_device_tm_start_key(cdev, tcw, intparm, lpm,
PAGE_DEFAULT_KEY);
}
EXPORT_SYMBOL(ccw_device_tm_start);
/**
* ccw_device_tm_start_timeout() - perform start function
* @cdev: ccw device on which to perform the start function
* @tcw: transport-command word to be started
* @intparm: user defined parameter to be passed to the interrupt handler
* @lpm: mask of paths to use
* @expires: time span in jiffies after which to abort request
*
* Start the tcw on the given ccw device. Return zero on success, non-zero
* otherwise.
*/
int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
unsigned long intparm, u8 lpm, int expires)
{
return ccw_device_tm_start_timeout_key(cdev, tcw, intparm, lpm,
PAGE_DEFAULT_KEY, expires);
}
EXPORT_SYMBOL(ccw_device_tm_start_timeout);
/**
* ccw_device_get_mdc() - accumulate max data count
* @cdev: ccw device for which the max data count is accumulated
* @mask: mask of paths to use
*
* Return the number of 64K-bytes blocks all paths at least support
* for a transport command. Return value 0 indicates failure.
*/
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct channel_path *chp;
struct chp_id chpid;
int mdc = 0, i;
/* Adjust requested path mask to excluded varied off paths. */
if (mask)
mask &= sch->lpm;
else
mask = sch->lpm;
chp_id_init(&chpid);
for (i = 0; i < 8; i++) {
if (!(mask & (0x80 >> i)))
continue;
chpid.id = sch->schib.pmcw.chpid[i];
chp = chpid_to_chp(chpid);
if (!chp)
continue;
mutex_lock(&chp->lock);
if (!chp->desc_fmt1.f) {
mutex_unlock(&chp->lock);
return 0;
}
if (!chp->desc_fmt1.r)
mdc = 1;
mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) :
chp->desc_fmt1.mdc;
mutex_unlock(&chp->lock);
}
return mdc;
}
EXPORT_SYMBOL(ccw_device_get_mdc);
/**
* ccw_device_tm_intrg() - perform interrogate function
* @cdev: ccw device on which to perform the interrogate function
*
* Perform an interrogate function on the given ccw device. Return zero on
* success, non-zero otherwise.
*/
int ccw_device_tm_intrg(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
if (!scsw_is_tm(&sch->schib.scsw) ||
!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_START_PEND))
return -EINVAL;
return cio_tm_intrg(sch);
}
EXPORT_SYMBOL(ccw_device_tm_intrg);
/**
* ccw_device_get_schid() - obtain a subchannel id
* @cdev: device to obtain the id for
* @schid: where to fill in the values
*/
void ccw_device_get_schid(struct ccw_device *cdev, struct subchannel_id *schid)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
*schid = sch->schid;
}
EXPORT_SYMBOL_GPL(ccw_device_get_schid);
/**
* ccw_device_pnso() - Perform Network-Subchannel Operation
* @cdev: device on which PNSO is performed
* @pnso_area: request and response block for the operation
* @oc: Operation Code
* @resume_token: resume token for multiblock response
* @cnc: Boolean change-notification control
*
* pnso_area must be allocated by the caller with get_zeroed_page(GFP_KERNEL)
*
* Returns 0 on success.
*/
int ccw_device_pnso(struct ccw_device *cdev,
struct chsc_pnso_area *pnso_area, u8 oc,
struct chsc_pnso_resume_token resume_token, int cnc)
{
struct subchannel_id schid;
ccw_device_get_schid(cdev, &schid);
return chsc_pnso(schid, pnso_area, oc, resume_token, cnc);
}
EXPORT_SYMBOL_GPL(ccw_device_pnso);
/**
* ccw_device_get_cssid() - obtain Channel Subsystem ID
* @cdev: device to obtain the CSSID for
* @cssid: The resulting Channel Subsystem ID
*/
int ccw_device_get_cssid(struct ccw_device *cdev, u8 *cssid)
{
struct device *sch_dev = cdev->dev.parent;
struct channel_subsystem *css = to_css(sch_dev->parent);
if (css->id_valid)
*cssid = css->cssid;
return css->id_valid ? 0 : -ENODEV;
}
EXPORT_SYMBOL_GPL(ccw_device_get_cssid);
/**
* ccw_device_get_iid() - obtain MIF-image ID
* @cdev: device to obtain the MIF-image ID for
* @iid: The resulting MIF-image ID
*/
int ccw_device_get_iid(struct ccw_device *cdev, u8 *iid)
{
struct device *sch_dev = cdev->dev.parent;
struct channel_subsystem *css = to_css(sch_dev->parent);
if (css->id_valid)
*iid = css->iid;
return css->id_valid ? 0 : -ENODEV;
}
EXPORT_SYMBOL_GPL(ccw_device_get_iid);
/**
* ccw_device_get_chpid() - obtain Channel Path ID
* @cdev: device to obtain the Channel Path ID for
* @chp_idx: Index of the channel path
* @chpid: The resulting Channel Path ID
*/
int ccw_device_get_chpid(struct ccw_device *cdev, int chp_idx, u8 *chpid)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
int mask;
if ((chp_idx < 0) || (chp_idx > 7))
return -EINVAL;
mask = 0x80 >> chp_idx;
if (!(sch->schib.pmcw.pim & mask))
return -ENODEV;
*chpid = sch->schib.pmcw.chpid[chp_idx];
return 0;
}
EXPORT_SYMBOL_GPL(ccw_device_get_chpid);
/**
* ccw_device_get_chid() - obtain Channel ID associated with specified CHPID
* @cdev: device to obtain the Channel ID for
* @chp_idx: Index of the channel path
* @chid: The resulting Channel ID
*/
int ccw_device_get_chid(struct ccw_device *cdev, int chp_idx, u16 *chid)
{
struct chp_id cssid_chpid;
struct channel_path *chp;
int rc;
chp_id_init(&cssid_chpid);
rc = ccw_device_get_chpid(cdev, chp_idx, &cssid_chpid.id);
if (rc)
return rc;
chp = chpid_to_chp(cssid_chpid);
if (!chp)
return -ENODEV;
mutex_lock(&chp->lock);
if (chp->desc_fmt1.flags & 0x10)
*chid = chp->desc_fmt1.chid;
else
rc = -ENODEV;
mutex_unlock(&chp->lock);
return rc;
}
EXPORT_SYMBOL_GPL(ccw_device_get_chid);
/*
* Allocate zeroed dma coherent 31 bit addressable memory using
* the subchannels dma pool. Maximal size of allocation supported
* is PAGE_SIZE.
*/
void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
{
void *addr;
if (!get_device(&cdev->dev))
return NULL;
addr = cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
if (IS_ERR_OR_NULL(addr))
put_device(&cdev->dev);
return addr;
}
EXPORT_SYMBOL(ccw_device_dma_zalloc);
void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size)
{
if (!cpu_addr)
return;
cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size);
put_device(&cdev->dev);
}
EXPORT_SYMBOL(ccw_device_dma_free);
EXPORT_SYMBOL(ccw_device_set_options_mask);
EXPORT_SYMBOL(ccw_device_set_options);
EXPORT_SYMBOL(ccw_device_clear_options);
EXPORT_SYMBOL(ccw_device_clear);
EXPORT_SYMBOL(ccw_device_halt);
EXPORT_SYMBOL(ccw_device_resume);
EXPORT_SYMBOL(ccw_device_start_timeout);
EXPORT_SYMBOL(ccw_device_start);
EXPORT_SYMBOL(ccw_device_start_timeout_key);
EXPORT_SYMBOL(ccw_device_start_key);
EXPORT_SYMBOL(ccw_device_get_ciw);
EXPORT_SYMBOL(ccw_device_get_path_mask);
EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
EXPORT_SYMBOL_GPL(ccw_device_get_util_str);
| linux-master | drivers/s390/cio/device_ops.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2008, 2009
*
* Author: Jan Glauber ([email protected])
*/
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/debug.h>
#include "qdio_debug.h"
#include "qdio.h"
debug_info_t *qdio_dbf_setup;
debug_info_t *qdio_dbf_error;
static struct dentry *debugfs_root;
#define QDIO_DEBUGFS_NAME_LEN 10
#define QDIO_DBF_NAME_LEN 20
struct qdio_dbf_entry {
char dbf_name[QDIO_DBF_NAME_LEN];
debug_info_t *dbf_info;
struct list_head dbf_list;
};
static LIST_HEAD(qdio_dbf_list);
static DEFINE_MUTEX(qdio_dbf_list_mutex);
static debug_info_t *qdio_get_dbf_entry(char *name)
{
struct qdio_dbf_entry *entry;
debug_info_t *rc = NULL;
mutex_lock(&qdio_dbf_list_mutex);
list_for_each_entry(entry, &qdio_dbf_list, dbf_list) {
if (strcmp(entry->dbf_name, name) == 0) {
rc = entry->dbf_info;
break;
}
}
mutex_unlock(&qdio_dbf_list_mutex);
return rc;
}
static void qdio_clear_dbf_list(void)
{
struct qdio_dbf_entry *entry, *tmp;
mutex_lock(&qdio_dbf_list_mutex);
list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) {
list_del(&entry->dbf_list);
debug_unregister(entry->dbf_info);
kfree(entry);
}
mutex_unlock(&qdio_dbf_list_mutex);
}
int qdio_allocate_dbf(struct qdio_irq *irq_ptr)
{
char text[QDIO_DBF_NAME_LEN];
struct qdio_dbf_entry *new_entry;
DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
/* allocate trace view for the interface */
snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
dev_name(&irq_ptr->cdev->dev));
irq_ptr->debug_area = qdio_get_dbf_entry(text);
if (irq_ptr->debug_area)
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
else {
irq_ptr->debug_area = debug_register(text, 2, 1, 16);
if (!irq_ptr->debug_area)
return -ENOMEM;
if (debug_register_view(irq_ptr->debug_area,
&debug_hex_ascii_view)) {
debug_unregister(irq_ptr->debug_area);
return -ENOMEM;
}
debug_set_level(irq_ptr->debug_area, DBF_WARN);
DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL);
if (!new_entry) {
debug_unregister(irq_ptr->debug_area);
return -ENOMEM;
}
strscpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
new_entry->dbf_info = irq_ptr->debug_area;
mutex_lock(&qdio_dbf_list_mutex);
list_add(&new_entry->dbf_list, &qdio_dbf_list);
mutex_unlock(&qdio_dbf_list_mutex);
}
return 0;
}
static int qstat_show(struct seq_file *m, void *v)
{
unsigned char state;
struct qdio_q *q = m->private;
int i;
if (!q)
return 0;
seq_printf(m, "Timestamp: %llx\n", q->timestamp);
seq_printf(m, "Last Data IRQ: %llx Last AI: %llx\n",
q->irq_ptr->last_data_irq_time, last_ai_time);
seq_printf(m, "nr_used: %d ftc: %d\n",
atomic_read(&q->nr_buf_used), q->first_to_check);
if (q->is_input_q) {
seq_printf(m, "batch start: %u batch count: %u\n",
q->u.in.batch_start, q->u.in.batch_count);
seq_printf(m, "DSCI: %x IRQs disabled: %u\n",
*(u8 *)q->irq_ptr->dsci,
test_bit(QDIO_IRQ_DISABLED,
&q->irq_ptr->poll_state));
}
seq_printf(m, "SBAL states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
debug_get_buf_state(q, i, &state);
switch (state) {
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_OUTPUT_NOT_INIT:
seq_printf(m, "N");
break;
case SLSB_P_OUTPUT_PENDING:
seq_printf(m, "P");
break;
case SLSB_P_INPUT_PRIMED:
case SLSB_CU_OUTPUT_PRIMED:
seq_printf(m, "+");
break;
case SLSB_P_INPUT_ACK:
seq_printf(m, "A");
break;
case SLSB_P_INPUT_ERROR:
case SLSB_P_OUTPUT_ERROR:
seq_printf(m, "x");
break;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_OUTPUT_EMPTY:
seq_printf(m, "-");
break;
case SLSB_P_INPUT_HALTED:
case SLSB_P_OUTPUT_HALTED:
seq_printf(m, ".");
break;
default:
seq_printf(m, "?");
}
if (i == 63)
seq_printf(m, "\n");
}
seq_printf(m, "\n");
seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
seq_printf(m, "\nSBAL statistics:");
if (!q->irq_ptr->perf_stat_enabled) {
seq_printf(m, " disabled\n");
return 0;
}
seq_printf(m, "\n1 2.. 4.. 8.. "
"16.. 32.. 64.. 128\n");
for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
q->q_stats.nr_sbal_total);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(qstat);
static int ssqd_show(struct seq_file *m, void *v)
{
struct ccw_device *cdev = m->private;
struct qdio_ssqd_desc ssqd;
int rc;
rc = qdio_get_ssqd_desc(cdev, &ssqd);
if (rc)
return rc;
seq_hex_dump(m, "", DUMP_PREFIX_NONE, 16, 4, &ssqd, sizeof(ssqd),
false);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ssqd);
static char *qperf_names[] = {
"Assumed adapter interrupts",
"QDIO interrupts",
"SIGA read",
"SIGA write",
"SIGA sync",
"Inbound calls",
"Inbound stop_polling",
"Inbound queue full",
"Outbound calls",
"Outbound queue full",
"Outbound fast_requeue",
"Outbound target_full",
"QEBSM eqbs",
"QEBSM eqbs partial",
"QEBSM sqbs",
"QEBSM sqbs partial",
"Discarded interrupts"
};
static int qperf_show(struct seq_file *m, void *v)
{
struct qdio_irq *irq_ptr = m->private;
unsigned int *stat;
int i;
if (!irq_ptr)
return 0;
if (!irq_ptr->perf_stat_enabled) {
seq_printf(m, "disabled\n");
return 0;
}
stat = (unsigned int *)&irq_ptr->perf_stat;
for (i = 0; i < ARRAY_SIZE(qperf_names); i++)
seq_printf(m, "%26s:\t%u\n",
qperf_names[i], *(stat + i));
return 0;
}
static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
struct qdio_irq *irq_ptr = seq->private;
struct qdio_q *q;
unsigned long val;
int ret, i;
if (!irq_ptr)
return 0;
ret = kstrtoul_from_user(ubuf, count, 10, &val);
if (ret)
return ret;
switch (val) {
case 0:
irq_ptr->perf_stat_enabled = 0;
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
for_each_input_queue(irq_ptr, q, i)
memset(&q->q_stats, 0, sizeof(q->q_stats));
for_each_output_queue(irq_ptr, q, i)
memset(&q->q_stats, 0, sizeof(q->q_stats));
break;
case 1:
irq_ptr->perf_stat_enabled = 1;
break;
}
return count;
}
static int qperf_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qperf_show,
file_inode(filp)->i_private);
}
static const struct file_operations debugfs_perf_fops = {
.owner = THIS_MODULE,
.open = qperf_seq_open,
.read = seq_read,
.write = qperf_seq_write,
.llseek = seq_lseek,
.release = single_release,
};
static void setup_debugfs_entry(struct dentry *parent, struct qdio_q *q)
{
char name[QDIO_DEBUGFS_NAME_LEN];
snprintf(name, QDIO_DEBUGFS_NAME_LEN, "%s_%d",
q->is_input_q ? "input" : "output",
q->nr);
debugfs_create_file(name, 0444, parent, q, &qstat_fops);
}
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
irq_ptr->debugfs_dev = debugfs_create_dir(dev_name(&irq_ptr->cdev->dev),
debugfs_root);
debugfs_create_file("statistics", S_IFREG | S_IRUGO | S_IWUSR,
irq_ptr->debugfs_dev, irq_ptr, &debugfs_perf_fops);
debugfs_create_file("ssqd", 0444, irq_ptr->debugfs_dev, irq_ptr->cdev,
&ssqd_fops);
for_each_input_queue(irq_ptr, q, i)
setup_debugfs_entry(irq_ptr->debugfs_dev, q);
for_each_output_queue(irq_ptr, q, i)
setup_debugfs_entry(irq_ptr->debugfs_dev, q);
}
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr)
{
debugfs_remove_recursive(irq_ptr->debugfs_dev);
}
int __init qdio_debug_init(void)
{
debugfs_root = debugfs_create_dir("qdio", NULL);
qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16);
debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
debug_set_level(qdio_dbf_setup, DBF_INFO);
DBF_EVENT("dbf created\n");
qdio_dbf_error = debug_register("qdio_error", 4, 1, 16);
debug_register_view(qdio_dbf_error, &debug_hex_ascii_view);
debug_set_level(qdio_dbf_error, DBF_INFO);
DBF_ERROR("dbf created\n");
return 0;
}
void qdio_debug_exit(void)
{
qdio_clear_dbf_list();
debugfs_remove_recursive(debugfs_root);
debug_unregister(qdio_dbf_setup);
debug_unregister(qdio_dbf_error);
}
| linux-master | drivers/s390/cio/qdio_debug.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S/390 common I/O routines -- blacklisting of specific devices
*
* Copyright IBM Corp. 1999, 2013
* Author(s): Ingo Adlung ([email protected])
* Cornelia Huck ([email protected])
* Arnd Bergmann ([email protected])
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/uaccess.h>
#include <asm/cio.h>
#include <asm/ipl.h>
#include "blacklist.h"
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
/*
* "Blacklisting" of certain devices:
* Device numbers given in the commandline as cio_ignore=... won't be known
* to Linux.
*
* These can be single devices or ranges of devices
*/
/* 65536 bits for each set to indicate if a devno is blacklisted or not */
#define __BL_DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
(8*sizeof(long)))
static unsigned long bl_dev[__MAX_SSID + 1][__BL_DEV_WORDS];
typedef enum {add, free} range_action;
/*
* Function: blacklist_range
* (Un-)blacklist the devices from-to
*/
static int blacklist_range(range_action action, unsigned int from_ssid,
unsigned int to_ssid, unsigned int from,
unsigned int to, int msgtrigger)
{
if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) {
if (msgtrigger)
pr_warn("0.%x.%04x to 0.%x.%04x is not a valid range for cio_ignore\n",
from_ssid, from, to_ssid, to);
return 1;
}
while ((from_ssid < to_ssid) || ((from_ssid == to_ssid) &&
(from <= to))) {
if (action == add)
set_bit(from, bl_dev[from_ssid]);
else
clear_bit(from, bl_dev[from_ssid]);
from++;
if (from > __MAX_SUBCHANNEL) {
from_ssid++;
from = 0;
}
}
return 0;
}
static int pure_hex(char **cp, unsigned int *val, int min_digit,
int max_digit, int max_val)
{
int diff;
diff = 0;
*val = 0;
while (diff <= max_digit) {
int value = hex_to_bin(**cp);
if (value < 0)
break;
*val = *val * 16 + value;
(*cp)++;
diff++;
}
if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
return 1;
return 0;
}
static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid,
unsigned int *devno, int msgtrigger)
{
char *str_work;
int val, rc, ret;
rc = 1;
if (*str == '\0')
goto out;
/* old style */
str_work = str;
val = simple_strtoul(str, &str_work, 16);
if (*str_work == '\0') {
if (val <= __MAX_SUBCHANNEL) {
*devno = val;
*ssid = 0;
*cssid = 0;
rc = 0;
}
goto out;
}
/* new style */
str_work = str;
ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
if (ret || (str_work[0] != '.'))
goto out;
str_work++;
ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
if (ret || (str_work[0] != '.'))
goto out;
str_work++;
ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
if (ret || (str_work[0] != '\0'))
goto out;
rc = 0;
out:
if (rc && msgtrigger)
pr_warn("%s is not a valid device for the cio_ignore kernel parameter\n",
str);
return rc;
}
static int blacklist_parse_parameters(char *str, range_action action,
int msgtrigger)
{
unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
int rc, totalrc;
char *parm;
range_action ra;
totalrc = 0;
while ((parm = strsep(&str, ","))) {
rc = 0;
ra = action;
if (*parm == '!') {
if (ra == add)
ra = free;
else
ra = add;
parm++;
}
if (strcmp(parm, "all") == 0) {
from_cssid = 0;
from_ssid = 0;
from = 0;
to_cssid = __MAX_CSSID;
to_ssid = __MAX_SSID;
to = __MAX_SUBCHANNEL;
} else if (strcmp(parm, "ipldev") == 0) {
if (ipl_info.type == IPL_TYPE_CCW) {
from_cssid = 0;
from_ssid = ipl_info.data.ccw.dev_id.ssid;
from = ipl_info.data.ccw.dev_id.devno;
} else if (ipl_info.type == IPL_TYPE_FCP ||
ipl_info.type == IPL_TYPE_FCP_DUMP) {
from_cssid = 0;
from_ssid = ipl_info.data.fcp.dev_id.ssid;
from = ipl_info.data.fcp.dev_id.devno;
} else {
continue;
}
to_cssid = from_cssid;
to_ssid = from_ssid;
to = from;
} else if (strcmp(parm, "condev") == 0) {
if (console_devno == -1)
continue;
from_cssid = to_cssid = 0;
from_ssid = to_ssid = 0;
from = to = console_devno;
} else {
rc = parse_busid(strsep(&parm, "-"), &from_cssid,
&from_ssid, &from, msgtrigger);
if (!rc) {
if (parm != NULL)
rc = parse_busid(parm, &to_cssid,
&to_ssid, &to,
msgtrigger);
else {
to_cssid = from_cssid;
to_ssid = from_ssid;
to = from;
}
}
}
if (!rc) {
rc = blacklist_range(ra, from_ssid, to_ssid, from, to,
msgtrigger);
if (rc)
totalrc = -EINVAL;
} else
totalrc = -EINVAL;
}
return totalrc;
}
static int __init
blacklist_setup (char *str)
{
CIO_MSG_EVENT(6, "Reading blacklist parameters\n");
if (blacklist_parse_parameters(str, add, 1))
return 0;
return 1;
}
__setup ("cio_ignore=", blacklist_setup);
/* Checking if devices are blacklisted */
/*
* Function: is_blacklisted
* Returns 1 if the given devicenumber can be found in the blacklist,
* otherwise 0.
* Used by validate_subchannel()
*/
int
is_blacklisted (int ssid, int devno)
{
return test_bit (devno, bl_dev[ssid]);
}
#ifdef CONFIG_PROC_FS
/*
* Function: blacklist_parse_proc_parameters
* parse the stuff which is piped to /proc/cio_ignore
*/
static int blacklist_parse_proc_parameters(char *buf)
{
int rc;
char *parm;
parm = strsep(&buf, " ");
if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
/*
* Evaluate the subchannels without an online device. This way,
* no path-verification will be triggered on those subchannels
* and it avoids unnecessary delays.
*/
css_schedule_eval_cond(CSS_EVAL_NOT_ONLINE, 0);
} else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
return ccw_purge_blacklisted();
else
return -EINVAL;
return rc;
}
/* Iterator struct for all devices. */
struct ccwdev_iter {
int devno;
int ssid;
int in_range;
};
static void *
cio_ignore_proc_seq_start(struct seq_file *s, loff_t *offset)
{
struct ccwdev_iter *iter = s->private;
if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
return NULL;
memset(iter, 0, sizeof(*iter));
iter->ssid = *offset / (__MAX_SUBCHANNEL + 1);
iter->devno = *offset % (__MAX_SUBCHANNEL + 1);
return iter;
}
static void
cio_ignore_proc_seq_stop(struct seq_file *s, void *it)
{
}
static void *
cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
{
struct ccwdev_iter *iter;
loff_t p = *offset;
(*offset)++;
if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
return NULL;
iter = it;
if (iter->devno == __MAX_SUBCHANNEL) {
iter->devno = 0;
iter->ssid++;
if (iter->ssid > __MAX_SSID)
return NULL;
} else
iter->devno++;
return iter;
}
static int
cio_ignore_proc_seq_show(struct seq_file *s, void *it)
{
struct ccwdev_iter *iter;
iter = it;
if (!is_blacklisted(iter->ssid, iter->devno))
/* Not blacklisted, nothing to output. */
return 0;
if (!iter->in_range) {
/* First device in range. */
if ((iter->devno == __MAX_SUBCHANNEL) ||
!is_blacklisted(iter->ssid, iter->devno + 1)) {
/* Singular device. */
seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
return 0;
}
iter->in_range = 1;
seq_printf(s, "0.%x.%04x-", iter->ssid, iter->devno);
return 0;
}
if ((iter->devno == __MAX_SUBCHANNEL) ||
!is_blacklisted(iter->ssid, iter->devno + 1)) {
/* Last device in range. */
iter->in_range = 0;
seq_printf(s, "0.%x.%04x\n", iter->ssid, iter->devno);
}
return 0;
}
static ssize_t
cio_ignore_write(struct file *file, const char __user *user_buf,
size_t user_len, loff_t *offset)
{
char *buf;
ssize_t rc, ret, i;
if (*offset)
return -EINVAL;
if (user_len > 65536)
user_len = 65536;
buf = vzalloc(user_len + 1); /* maybe better use the stack? */
if (buf == NULL)
return -ENOMEM;
if (strncpy_from_user (buf, user_buf, user_len) < 0) {
rc = -EFAULT;
goto out_free;
}
i = user_len - 1;
while ((i >= 0) && (isspace(buf[i]) || (buf[i] == 0))) {
buf[i] = '\0';
i--;
}
ret = blacklist_parse_proc_parameters(buf);
if (ret)
rc = ret;
else
rc = user_len;
out_free:
vfree (buf);
return rc;
}
static const struct seq_operations cio_ignore_proc_seq_ops = {
.start = cio_ignore_proc_seq_start,
.stop = cio_ignore_proc_seq_stop,
.next = cio_ignore_proc_seq_next,
.show = cio_ignore_proc_seq_show,
};
static int
cio_ignore_proc_open(struct inode *inode, struct file *file)
{
return seq_open_private(file, &cio_ignore_proc_seq_ops,
sizeof(struct ccwdev_iter));
}
static const struct proc_ops cio_ignore_proc_ops = {
.proc_open = cio_ignore_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = seq_release_private,
.proc_write = cio_ignore_write,
};
static int
cio_ignore_proc_init (void)
{
struct proc_dir_entry *entry;
entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
&cio_ignore_proc_ops);
if (!entry)
return -ENOENT;
return 0;
}
__initcall (cio_ignore_proc_init);
#endif /* CONFIG_PROC_FS */
| linux-master | drivers/s390/cio/blacklist.c |
// SPDX-License-Identifier: GPL-2.0
/*
* qdio queue initialization
*
* Copyright IBM Corp. 2008
* Author(s): Jan Glauber <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/io.h>
#include <asm/ebcdic.h>
#include <asm/qdio.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "chsc.h"
#include "qdio.h"
#include "qdio_debug.h"
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
static struct kmem_cache *qdio_q_cache;
/**
* qdio_free_buffers() - free qdio buffers
* @buf: array of pointers to qdio buffers
* @count: number of qdio buffers to free
*/
void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count)
{
int pos;
for (pos = 0; pos < count; pos += QBUFF_PER_PAGE)
free_page((unsigned long) buf[pos]);
}
EXPORT_SYMBOL_GPL(qdio_free_buffers);
/**
* qdio_alloc_buffers() - allocate qdio buffers
* @buf: array of pointers to qdio buffers
* @count: number of qdio buffers to allocate
*/
int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count)
{
int pos;
for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) {
buf[pos] = (void *) get_zeroed_page(GFP_KERNEL);
if (!buf[pos]) {
qdio_free_buffers(buf, count);
return -ENOMEM;
}
}
for (pos = 0; pos < count; pos++)
if (pos % QBUFF_PER_PAGE)
buf[pos] = buf[pos - 1] + 1;
return 0;
}
EXPORT_SYMBOL_GPL(qdio_alloc_buffers);
/**
* qdio_reset_buffers() - reset qdio buffers
* @buf: array of pointers to qdio buffers
* @count: number of qdio buffers that will be zeroed
*/
void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
{
int pos;
for (pos = 0; pos < count; pos++)
memset(buf[pos], 0, sizeof(struct qdio_buffer));
}
EXPORT_SYMBOL_GPL(qdio_reset_buffers);
static void __qdio_free_queues(struct qdio_q **queues, unsigned int count)
{
struct qdio_q *q;
unsigned int i;
for (i = 0; i < count; i++) {
q = queues[i];
free_page((unsigned long) q->slib);
kmem_cache_free(qdio_q_cache, q);
}
}
void qdio_free_queues(struct qdio_irq *irq_ptr)
{
__qdio_free_queues(irq_ptr->input_qs, irq_ptr->max_input_qs);
irq_ptr->max_input_qs = 0;
__qdio_free_queues(irq_ptr->output_qs, irq_ptr->max_output_qs);
irq_ptr->max_output_qs = 0;
}
static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
{
struct qdio_q *q;
int i;
for (i = 0; i < nr_queues; i++) {
q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
if (!q) {
__qdio_free_queues(irq_ptr_qs, i);
return -ENOMEM;
}
q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
if (!q->slib) {
kmem_cache_free(qdio_q_cache, q);
__qdio_free_queues(irq_ptr_qs, i);
return -ENOMEM;
}
irq_ptr_qs[i] = q;
}
return 0;
}
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
{
int rc;
rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
if (rc)
return rc;
rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
if (rc) {
__qdio_free_queues(irq_ptr->input_qs, nr_input_qs);
return rc;
}
irq_ptr->max_input_qs = nr_input_qs;
irq_ptr->max_output_qs = nr_output_qs;
return 0;
}
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
qdio_handler_t *handler, int i)
{
struct slib *slib = q->slib;
/* queue must be cleared for qdio_establish */
memset(q, 0, sizeof(*q));
memset(slib, 0, PAGE_SIZE);
q->slib = slib;
q->irq_ptr = irq_ptr;
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
struct qdio_buffer **sbals_array, int i)
{
struct qdio_q *prev;
int j;
DBF_HEX(&q, sizeof(void *));
q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sbal[j] = *sbals_array++;
/* fill in slib */
if (i > 0) {
prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
: irq_ptr->output_qs[i - 1];
prev->slib->nsliba = (unsigned long)q->slib;
}
q->slib->sla = (unsigned long)q->sl;
q->slib->slsba = (unsigned long)&q->slsb.val[0];
/* fill in sl */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sl->element[j].sbal = virt_to_phys(q->sbal[j]);
}
static void setup_queues(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
struct qdio_q *q;
int i;
for_each_input_queue(irq_ptr, q, i) {
DBF_EVENT("inq:%1d", i);
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
setup_storage_lists(q, irq_ptr,
qdio_init->input_sbal_addr_array[i], i);
}
for_each_output_queue(irq_ptr, q, i) {
DBF_EVENT("outq:%1d", i);
setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
q->is_input_q = 0;
setup_storage_lists(q, irq_ptr,
qdio_init->output_sbal_addr_array[i], i);
}
}
static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
unsigned char qdioac, unsigned long token)
{
if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
goto no_qebsm;
if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
(!(qdioac & AC1_SC_QEBSM_ENABLED)))
goto no_qebsm;
irq_ptr->sch_token = token;
DBF_EVENT("V=V:1");
DBF_EVENT("%8lx", irq_ptr->sch_token);
return;
no_qebsm:
irq_ptr->sch_token = 0;
irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
DBF_EVENT("noV=V");
}
/*
* If there is a qdio_irq we use the chsc_page and store the information
* in the qdio_irq, otherwise we copy it to the specified structure.
*/
int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
struct subchannel_id *schid,
struct qdio_ssqd_desc *data)
{
struct chsc_ssqd_area *ssqd;
int rc;
DBF_EVENT("getssqd:%4x", schid->sch_no);
if (!irq_ptr) {
ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
if (!ssqd)
return -ENOMEM;
} else {
ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
}
rc = chsc_ssqd(*schid, ssqd);
if (rc)
goto out;
if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
(ssqd->qdio_ssqd.sch != schid->sch_no))
rc = -EINVAL;
if (!rc)
memcpy(data, &ssqd->qdio_ssqd, sizeof(*data));
out:
if (!irq_ptr)
free_page((unsigned long)ssqd);
return rc;
}
void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
{
unsigned char qdioac;
int rc;
rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc);
if (rc) {
DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
DBF_ERROR("rc:%x", rc);
/* all flags set, worst case */
qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
AC1_SIGA_SYNC_NEEDED;
} else
qdioac = irq_ptr->ssqd_desc.qdioac1;
check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
irq_ptr->qdioac1 = qdioac;
DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
}
static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
{
desc->sliba = virt_to_phys(queue->slib);
desc->sla = virt_to_phys(queue->sl);
desc->slsba = virt_to_phys(&queue->slsb);
desc->akey = PAGE_DEFAULT_KEY >> 4;
desc->bkey = PAGE_DEFAULT_KEY >> 4;
desc->ckey = PAGE_DEFAULT_KEY >> 4;
desc->dkey = PAGE_DEFAULT_KEY >> 4;
}
static void setup_qdr(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
struct qdesfmt0 *desc = &irq_ptr->qdr->qdf0[0];
int i;
memset(irq_ptr->qdr, 0, sizeof(struct qdr));
irq_ptr->qdr->qfmt = qdio_init->q_format;
irq_ptr->qdr->ac = qdio_init->qdr_ac;
irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
irq_ptr->qdr->qiba = virt_to_phys(&irq_ptr->qib);
irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
for (i = 0; i < qdio_init->no_input_qs; i++)
qdio_fill_qdr_desc(desc++, irq_ptr->input_qs[i]);
for (i = 0; i < qdio_init->no_output_qs; i++)
qdio_fill_qdr_desc(desc++, irq_ptr->output_qs[i]);
}
static void setup_qib(struct qdio_irq *irq_ptr,
struct qdio_initialize *init_data)
{
memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
irq_ptr->qib.qfmt = init_data->q_format;
irq_ptr->qib.pfmt = init_data->qib_param_field_format;
irq_ptr->qib.rflags = init_data->qib_rflags;
if (css_general_characteristics.qebsm)
irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
if (init_data->no_input_qs)
irq_ptr->qib.isliba =
(unsigned long)(irq_ptr->input_qs[0]->slib);
if (init_data->no_output_qs)
irq_ptr->qib.osliba =
(unsigned long)(irq_ptr->output_qs[0]->slib);
memcpy(irq_ptr->qib.ebcnam, dev_name(&irq_ptr->cdev->dev), 8);
ASCEBC(irq_ptr->qib.ebcnam, 8);
if (init_data->qib_param_field)
memcpy(irq_ptr->qib.parm, init_data->qib_param_field,
sizeof(irq_ptr->qib.parm));
}
void qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
{
struct ccw_device *cdev = irq_ptr->cdev;
irq_ptr->qdioac1 = 0;
memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
irq_ptr->debugfs_dev = NULL;
irq_ptr->sch_token = irq_ptr->perf_stat_enabled = 0;
irq_ptr->state = QDIO_IRQ_STATE_INACTIVE;
irq_ptr->error_handler = init_data->input_handler;
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
ccw_device_get_schid(cdev, &irq_ptr->schid);
setup_queues(irq_ptr, init_data);
irq_ptr->irq_poll = init_data->irq_poll;
set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
setup_qib(irq_ptr, init_data);
/* fill input and output descriptors */
setup_qdr(irq_ptr, init_data);
/* qdr, qib, sls, slsbs, slibs, sbales are filled now */
/* set our IRQ handler */
spin_lock_irq(get_ccwdev_lock(cdev));
irq_ptr->orig_handler = cdev->handler;
cdev->handler = qdio_int_handler;
spin_unlock_irq(get_ccwdev_lock(cdev));
}
void qdio_shutdown_irq(struct qdio_irq *irq)
{
struct ccw_device *cdev = irq->cdev;
/* restore IRQ handler */
spin_lock_irq(get_ccwdev_lock(cdev));
cdev->handler = irq->orig_handler;
cdev->private->intparm = 0;
spin_unlock_irq(get_ccwdev_lock(cdev));
}
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
{
dev_info(&irq_ptr->cdev->dev,
"qdio: %s on SC %x using AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s\n",
(irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
irq_ptr->schid.sch_no,
is_thinint_irq(irq_ptr),
(irq_ptr->sch_token) ? 1 : 0,
pci_out_supported(irq_ptr) ? 1 : 0,
css_general_characteristics.aif_tdd,
qdio_need_siga_in(irq_ptr) ? "R" : " ",
qdio_need_siga_out(irq_ptr) ? "W" : " ",
qdio_need_siga_sync(irq_ptr) ? "S" : " ");
}
int __init qdio_setup_init(void)
{
qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
256, 0, NULL);
if (!qdio_q_cache)
return -ENOMEM;
/* Check for OSA/FCP thin interrupts (bit 67). */
DBF_EVENT("thinint:%1d",
(css_general_characteristics.aif_osa) ? 1 : 0);
/* Check for QEBSM support in general (bit 58). */
DBF_EVENT("cssQEBSM:%1d", css_general_characteristics.qebsm);
return 0;
}
void qdio_setup_exit(void)
{
kmem_cache_destroy(qdio_q_cache);
}
| linux-master | drivers/s390/cio/qdio_setup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* S/390 common I/O routines -- low level i/o calls
*
* Copyright IBM Corp. 1999, 2008
* Author(s): Ingo Adlung ([email protected])
* Cornelia Huck ([email protected])
* Arnd Bergmann ([email protected])
* Martin Schwidefsky ([email protected])
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/ftrace.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/cio.h>
#include <asm/delay.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/setup.h>
#include <asm/ipl.h>
#include <asm/chpid.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include <linux/sched/cputime.h>
#include <asm/fcx.h>
#include <asm/nmi.h>
#include <asm/crw.h>
#include "cio.h"
#include "css.h"
#include "chsc.h"
#include "ioasm.h"
#include "io_sch.h"
#include "blacklist.h"
#include "cio_debug.h"
#include "chp.h"
#include "trace.h"
debug_info_t *cio_debug_msg_id;
debug_info_t *cio_debug_trace_id;
debug_info_t *cio_debug_crw_id;
DEFINE_PER_CPU_ALIGNED(struct irb, cio_irb);
EXPORT_PER_CPU_SYMBOL(cio_irb);
/*
* Function: cio_debug_init
* Initializes three debug logs for common I/O:
* - cio_msg logs generic cio messages
* - cio_trace logs the calling of different functions
* - cio_crw logs machine check related cio messages
*/
static int __init cio_debug_init(void)
{
cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long));
if (!cio_debug_msg_id)
goto out_unregister;
debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
debug_set_level(cio_debug_msg_id, 2);
cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
if (!cio_debug_trace_id)
goto out_unregister;
debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
debug_set_level(cio_debug_trace_id, 2);
cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long));
if (!cio_debug_crw_id)
goto out_unregister;
debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
debug_set_level(cio_debug_crw_id, 4);
return 0;
out_unregister:
debug_unregister(cio_debug_msg_id);
debug_unregister(cio_debug_trace_id);
debug_unregister(cio_debug_crw_id);
return -1;
}
arch_initcall (cio_debug_init);
int cio_set_options(struct subchannel *sch, int flags)
{
struct io_subchannel_private *priv = to_io_private(sch);
priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
return 0;
}
static int
cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
{
char dbf_text[15];
if (lpm != 0)
sch->lpm &= ~lpm;
else
sch->lpm = 0;
CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
"subchannel 0.%x.%04x!\n", sch->schid.ssid,
sch->schid.sch_no);
if (cio_update_schib(sch))
return -ENODEV;
sprintf(dbf_text, "no%s", dev_name(&sch->dev));
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
return (sch->lpm ? -EACCES : -ENODEV);
}
int
cio_start_key (struct subchannel *sch, /* subchannel structure */
struct ccw1 * cpa, /* logical channel prog addr */
__u8 lpm, /* logical path mask */
__u8 key) /* storage key */
{
struct io_subchannel_private *priv = to_io_private(sch);
union orb *orb = &priv->orb;
int ccode;
CIO_TRACE_EVENT(5, "stIO");
CIO_TRACE_EVENT(5, dev_name(&sch->dev));
memset(orb, 0, sizeof(union orb));
/* sch is always under 2G. */
orb->cmd.intparm = (u32)virt_to_phys(sch);
orb->cmd.fmt = 1;
orb->cmd.pfch = priv->options.prefetch == 0;
orb->cmd.spnd = priv->options.suspend;
orb->cmd.ssic = priv->options.suspend && priv->options.inter;
orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
/*
* for 64 bit we always support 64 bit IDAWs with 4k page size only
*/
orb->cmd.c64 = 1;
orb->cmd.i2k = 0;
orb->cmd.key = key >> 4;
/* issue "Start Subchannel" */
orb->cmd.cpa = (u32)virt_to_phys(cpa);
ccode = ssch(sch->schid, orb);
/* process condition code */
CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
/*
* initialize device status information
*/
sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
return 0;
case 1: /* status pending */
case 2: /* busy */
return -EBUSY;
case 3: /* device/path not operational */
return cio_start_handle_notoper(sch, lpm);
default:
return ccode;
}
}
EXPORT_SYMBOL_GPL(cio_start_key);
int
cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
{
return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
}
EXPORT_SYMBOL_GPL(cio_start);
/*
* resume suspended I/O operation
*/
int
cio_resume (struct subchannel *sch)
{
int ccode;
CIO_TRACE_EVENT(4, "resIO");
CIO_TRACE_EVENT(4, dev_name(&sch->dev));
ccode = rsch (sch->schid);
CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
return 0;
case 1:
return -EBUSY;
case 2:
return -EINVAL;
default:
/*
* useless to wait for request completion
* as device is no longer operational !
*/
return -ENODEV;
}
}
EXPORT_SYMBOL_GPL(cio_resume);
/*
* halt I/O operation
*/
int
cio_halt(struct subchannel *sch)
{
int ccode;
if (!sch)
return -ENODEV;
CIO_TRACE_EVENT(2, "haltIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
/*
* Issue "Halt subchannel" and process condition code
*/
ccode = hsch (sch->schid);
CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
return 0;
case 1: /* status pending */
case 2: /* busy */
return -EBUSY;
default: /* device not operational */
return -ENODEV;
}
}
EXPORT_SYMBOL_GPL(cio_halt);
/*
* Clear I/O operation
*/
int
cio_clear(struct subchannel *sch)
{
int ccode;
if (!sch)
return -ENODEV;
CIO_TRACE_EVENT(2, "clearIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
/*
* Issue "Clear subchannel" and process condition code
*/
ccode = csch (sch->schid);
CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0:
sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
return 0;
default: /* device not operational */
return -ENODEV;
}
}
EXPORT_SYMBOL_GPL(cio_clear);
/*
* Function: cio_cancel
* Issues a "Cancel Subchannel" on the specified subchannel
* Note: We don't need any fancy intparms and flags here
* since xsch is executed synchronously.
* Only for common I/O internal use as for now.
*/
int
cio_cancel (struct subchannel *sch)
{
int ccode;
if (!sch)
return -ENODEV;
CIO_TRACE_EVENT(2, "cancelIO");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
ccode = xsch (sch->schid);
CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
switch (ccode) {
case 0: /* success */
/* Update information in scsw. */
if (cio_update_schib(sch))
return -ENODEV;
return 0;
case 1: /* status pending */
return -EBUSY;
case 2: /* not applicable */
return -EINVAL;
default: /* not oper */
return -ENODEV;
}
}
EXPORT_SYMBOL_GPL(cio_cancel);
/**
* cio_cancel_halt_clear - Cancel running I/O by performing cancel, halt
* and clear ordinally if subchannel is valid.
* @sch: subchannel on which to perform the cancel_halt_clear operation
* @iretry: the number of the times remained to retry the next operation
*
* This should be called repeatedly since halt/clear are asynchronous
* operations. We do one try with cio_cancel, three tries with cio_halt,
* 255 tries with cio_clear. The caller should initialize @iretry with
* the value 255 for its first call to this, and keep using the same
* @iretry in the subsequent calls until it gets a non -EBUSY return.
*
* Returns 0 if device now idle, -ENODEV for device not operational,
* -EBUSY if an interrupt is expected (either from halt/clear or from a
* status pending), and -EIO if out of retries.
*/
int cio_cancel_halt_clear(struct subchannel *sch, int *iretry)
{
int ret;
if (cio_update_schib(sch))
return -ENODEV;
if (!sch->schib.pmcw.ena)
/* Not operational -> done. */
return 0;
/* Stage 1: cancel io. */
if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
if (!scsw_is_tm(&sch->schib.scsw)) {
ret = cio_cancel(sch);
if (ret != -EINVAL)
return ret;
}
/*
* Cancel io unsuccessful or not applicable (transport mode).
* Continue with asynchronous instructions.
*/
*iretry = 3; /* 3 halt retries. */
}
/* Stage 2: halt io. */
if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
if (*iretry) {
*iretry -= 1;
ret = cio_halt(sch);
if (ret != -EBUSY)
return (ret == 0) ? -EBUSY : ret;
}
/* Halt io unsuccessful. */
*iretry = 255; /* 255 clear retries. */
}
/* Stage 3: clear io. */
if (*iretry) {
*iretry -= 1;
ret = cio_clear(sch);
return (ret == 0) ? -EBUSY : ret;
}
/* Function was unsuccessful */
return -EIO;
}
EXPORT_SYMBOL_GPL(cio_cancel_halt_clear);
static void cio_apply_config(struct subchannel *sch, struct schib *schib)
{
schib->pmcw.intparm = sch->config.intparm;
schib->pmcw.mbi = sch->config.mbi;
schib->pmcw.isc = sch->config.isc;
schib->pmcw.ena = sch->config.ena;
schib->pmcw.mme = sch->config.mme;
schib->pmcw.mp = sch->config.mp;
schib->pmcw.csense = sch->config.csense;
schib->pmcw.mbfc = sch->config.mbfc;
if (sch->config.mbfc)
schib->mba = sch->config.mba;
}
static int cio_check_config(struct subchannel *sch, struct schib *schib)
{
return (schib->pmcw.intparm == sch->config.intparm) &&
(schib->pmcw.mbi == sch->config.mbi) &&
(schib->pmcw.isc == sch->config.isc) &&
(schib->pmcw.ena == sch->config.ena) &&
(schib->pmcw.mme == sch->config.mme) &&
(schib->pmcw.mp == sch->config.mp) &&
(schib->pmcw.csense == sch->config.csense) &&
(schib->pmcw.mbfc == sch->config.mbfc) &&
(!sch->config.mbfc || (schib->mba == sch->config.mba));
}
/*
* cio_commit_config - apply configuration to the subchannel
*/
int cio_commit_config(struct subchannel *sch)
{
int ccode, retry, ret = 0;
struct schib schib;
struct irb irb;
if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
for (retry = 0; retry < 5; retry++) {
/* copy desired changes to local schib */
cio_apply_config(sch, &schib);
ccode = msch(sch->schid, &schib);
if (ccode < 0) /* -EIO if msch gets a program check. */
return ccode;
switch (ccode) {
case 0: /* successful */
if (stsch(sch->schid, &schib) ||
!css_sch_is_valid(&schib))
return -ENODEV;
if (cio_check_config(sch, &schib)) {
/* commit changes from local schib */
memcpy(&sch->schib, &schib, sizeof(schib));
return 0;
}
ret = -EAGAIN;
break;
case 1: /* status pending */
ret = -EBUSY;
if (tsch(sch->schid, &irb))
return ret;
break;
case 2: /* busy */
udelay(100); /* allow for recovery */
ret = -EBUSY;
break;
case 3: /* not operational */
return -ENODEV;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(cio_commit_config);
/**
* cio_update_schib - Perform stsch and update schib if subchannel is valid.
* @sch: subchannel on which to perform stsch
* Return zero on success, -ENODEV otherwise.
*/
int cio_update_schib(struct subchannel *sch)
{
struct schib schib;
if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
memcpy(&sch->schib, &schib, sizeof(schib));
return 0;
}
EXPORT_SYMBOL_GPL(cio_update_schib);
/**
* cio_enable_subchannel - enable a subchannel.
* @sch: subchannel to be enabled
* @intparm: interruption parameter to set
*/
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
int ret;
CIO_TRACE_EVENT(2, "ensch");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
if (sch_is_pseudo_sch(sch))
return -EINVAL;
if (cio_update_schib(sch))
return -ENODEV;
sch->config.ena = 1;
sch->config.isc = sch->isc;
sch->config.intparm = intparm;
ret = cio_commit_config(sch);
if (ret == -EIO) {
/*
* Got a program check in msch. Try without
* the concurrent sense bit the next time.
*/
sch->config.csense = 0;
ret = cio_commit_config(sch);
}
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
EXPORT_SYMBOL_GPL(cio_enable_subchannel);
/**
* cio_disable_subchannel - disable a subchannel.
* @sch: subchannel to disable
*/
int cio_disable_subchannel(struct subchannel *sch)
{
int ret;
CIO_TRACE_EVENT(2, "dissch");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
if (sch_is_pseudo_sch(sch))
return 0;
if (cio_update_schib(sch))
return -ENODEV;
sch->config.ena = 0;
ret = cio_commit_config(sch);
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
EXPORT_SYMBOL_GPL(cio_disable_subchannel);
/*
* do_cio_interrupt() handles all normal I/O device IRQ's
*/
static irqreturn_t do_cio_interrupt(int irq, void *dummy)
{
struct tpi_info *tpi_info;
struct subchannel *sch;
struct irb *irb;
set_cpu_flag(CIF_NOHZ_DELAY);
tpi_info = &get_irq_regs()->tpi_info;
trace_s390_cio_interrupt(tpi_info);
irb = this_cpu_ptr(&cio_irb);
if (!tpi_info->intparm) {
/* Clear pending interrupt condition. */
inc_irq_stat(IRQIO_CIO);
tsch(tpi_info->schid, irb);
return IRQ_HANDLED;
}
sch = phys_to_virt(tpi_info->intparm);
spin_lock(sch->lock);
/* Store interrupt response block to lowcore. */
if (tsch(tpi_info->schid, irb) == 0) {
/* Keep subchannel information word up to date. */
memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw));
/* Call interrupt handler if there is one. */
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
else
inc_irq_stat(IRQIO_CIO);
} else
inc_irq_stat(IRQIO_CIO);
spin_unlock(sch->lock);
return IRQ_HANDLED;
}
void __init init_cio_interrupts(void)
{
irq_set_chip_and_handler(IO_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
if (request_irq(IO_INTERRUPT, do_cio_interrupt, 0, "I/O", NULL))
panic("Failed to register I/O interrupt\n");
}
#ifdef CONFIG_CCW_CONSOLE
static struct subchannel *console_sch;
static struct lock_class_key console_sch_key;
/*
* Use cio_tsch to update the subchannel status and call the interrupt handler
* if status had been pending. Called with the subchannel's lock held.
*/
void cio_tsch(struct subchannel *sch)
{
struct irb *irb;
int irq_context;
irb = this_cpu_ptr(&cio_irb);
/* Store interrupt response block to lowcore. */
if (tsch(sch->schid, irb) != 0)
/* Not status pending or not operational. */
return;
memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
/* Call interrupt handler with updated status. */
irq_context = in_interrupt();
if (!irq_context) {
local_bh_disable();
irq_enter();
}
kstat_incr_irq_this_cpu(IO_INTERRUPT);
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
else
inc_irq_stat(IRQIO_CIO);
if (!irq_context) {
irq_exit();
_local_bh_enable();
}
}
static int cio_test_for_console(struct subchannel_id schid, void *data)
{
struct schib schib;
if (stsch(schid, &schib) != 0)
return -ENXIO;
if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
(schib.pmcw.dev == console_devno)) {
console_irq = schid.sch_no;
return 1; /* found */
}
return 0;
}
static int cio_get_console_sch_no(void)
{
struct subchannel_id schid;
struct schib schib;
init_subchannel_id(&schid);
if (console_irq != -1) {
/* VM provided us with the irq number of the console. */
schid.sch_no = console_irq;
if (stsch(schid, &schib) != 0 ||
(schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
return -1;
console_devno = schib.pmcw.dev;
} else if (console_devno != -1) {
/* At least the console device number is known. */
for_each_subchannel(cio_test_for_console, NULL);
}
return console_irq;
}
struct subchannel *cio_probe_console(void)
{
struct subchannel_id schid;
struct subchannel *sch;
struct schib schib;
int sch_no, ret;
sch_no = cio_get_console_sch_no();
if (sch_no == -1) {
pr_warn("No CCW console was found\n");
return ERR_PTR(-ENODEV);
}
init_subchannel_id(&schid);
schid.sch_no = sch_no;
ret = stsch(schid, &schib);
if (ret)
return ERR_PTR(-ENODEV);
sch = css_alloc_subchannel(schid, &schib);
if (IS_ERR(sch))
return sch;
lockdep_set_class(sch->lock, &console_sch_key);
isc_register(CONSOLE_ISC);
sch->config.isc = CONSOLE_ISC;
sch->config.intparm = (u32)virt_to_phys(sch);
ret = cio_commit_config(sch);
if (ret) {
isc_unregister(CONSOLE_ISC);
put_device(&sch->dev);
return ERR_PTR(ret);
}
console_sch = sch;
return sch;
}
int cio_is_console(struct subchannel_id schid)
{
if (!console_sch)
return 0;
return schid_equal(&schid, &console_sch->schid);
}
void cio_register_early_subchannels(void)
{
int ret;
if (!console_sch)
return;
ret = css_register_subchannel(console_sch);
if (ret)
put_device(&console_sch->dev);
}
#endif /* CONFIG_CCW_CONSOLE */
/**
* cio_tm_start_key - perform start function
* @sch: subchannel on which to perform the start function
* @tcw: transport-command word to be started
* @lpm: mask of paths to use
* @key: storage key to use for storage access
*
* Start the tcw on the given subchannel. Return zero on success, non-zero
* otherwise.
*/
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
{
int cc;
union orb *orb = &to_io_private(sch)->orb;
memset(orb, 0, sizeof(union orb));
orb->tm.intparm = (u32)virt_to_phys(sch);
orb->tm.key = key >> 4;
orb->tm.b = 1;
orb->tm.lpm = lpm ? lpm : sch->lpm;
orb->tm.tcw = (u32)virt_to_phys(tcw);
cc = ssch(sch->schid, orb);
switch (cc) {
case 0:
return 0;
case 1:
case 2:
return -EBUSY;
default:
return cio_start_handle_notoper(sch, lpm);
}
}
EXPORT_SYMBOL_GPL(cio_tm_start_key);
/**
* cio_tm_intrg - perform interrogate function
* @sch: subchannel on which to perform the interrogate function
*
* If the specified subchannel is running in transport-mode, perform the
* interrogate function. Return zero on success, non-zero otherwie.
*/
int cio_tm_intrg(struct subchannel *sch)
{
int cc;
if (!to_io_private(sch)->orb.tm.b)
return -EINVAL;
cc = xsch(sch->schid);
switch (cc) {
case 0:
case 2:
return 0;
case 1:
return -EBUSY;
default:
return -ENODEV;
}
}
EXPORT_SYMBOL_GPL(cio_tm_intrg);
| linux-master | drivers/s390/cio/cio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Functions for incremental construction of fcx enabled I/O control blocks.
*
* Copyright IBM Corp. 2008
* Author(s): Peter Oberparleiter <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/module.h>
#include <asm/fcx.h>
#include <asm/itcw.h>
/*
* struct itcw - incremental tcw helper data type
*
* This structure serves as a handle for the incremental construction of a
* tcw and associated tccb, tsb, data tidaw-list plus an optional interrogate
* tcw and associated data. The data structures are contained inside a single
* contiguous buffer provided by the user.
*
* The itcw construction functions take care of overall data integrity:
* - reset unused fields to zero
* - fill in required pointers
* - ensure required alignment for data structures
* - prevent data structures to cross 4k-byte boundary where required
* - calculate tccb-related length fields
* - optionally provide ready-made interrogate tcw and associated structures
*
* Restrictions apply to the itcws created with these construction functions:
* - tida only supported for data address, not for tccb
* - only contiguous tidaw-lists (no ttic)
* - total number of bytes required per itcw may not exceed 4k bytes
* - either read or write operation (may not work with r=0 and w=0)
*
* Example:
* struct itcw *itcw;
* void *buffer;
* size_t size;
*
* size = itcw_calc_size(1, 2, 0);
* buffer = kmalloc(size, GFP_KERNEL | GFP_DMA);
* if (!buffer)
* return -ENOMEM;
* itcw = itcw_init(buffer, size, ITCW_OP_READ, 1, 2, 0);
* if (IS_ERR(itcw))
* return PTR_ER(itcw);
* itcw_add_dcw(itcw, 0x2, 0, NULL, 0, 72);
* itcw_add_tidaw(itcw, 0, 0x30000, 20);
* itcw_add_tidaw(itcw, 0, 0x40000, 52);
* itcw_finalize(itcw);
*
*/
struct itcw {
struct tcw *tcw;
struct tcw *intrg_tcw;
int num_tidaws;
int max_tidaws;
int intrg_num_tidaws;
int intrg_max_tidaws;
};
/**
* itcw_get_tcw - return pointer to tcw associated with the itcw
* @itcw: address of the itcw
*
* Return pointer to the tcw associated with the itcw.
*/
struct tcw *itcw_get_tcw(struct itcw *itcw)
{
return itcw->tcw;
}
EXPORT_SYMBOL(itcw_get_tcw);
/**
* itcw_calc_size - return the size of an itcw with the given parameters
* @intrg: if non-zero, add an interrogate tcw
* @max_tidaws: maximum number of tidaws to be used for data addressing or zero
* if no tida is to be used.
* @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
* by the interrogate tcw, if specified
*
* Calculate and return the number of bytes required to hold an itcw with the
* given parameters and assuming tccbs with maximum size.
*
* Note that the resulting size also contains bytes needed for alignment
* padding as well as padding to ensure that data structures don't cross a
* 4k-boundary where required.
*/
size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
{
size_t len;
int cross_count;
/* Main data. */
len = sizeof(struct itcw);
len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
/* TSB */ sizeof(struct tsb) +
/* TIDAL */ max_tidaws * sizeof(struct tidaw);
/* Interrogate data. */
if (intrg) {
len += /* TCW */ sizeof(struct tcw) + /* TCCB */ TCCB_MAX_SIZE +
/* TSB */ sizeof(struct tsb) +
/* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
}
/* Maximum required alignment padding. */
len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
/* TIDAW lists may not cross a 4k boundary. To cross a
* boundary we need to add a TTIC TIDAW. We need to reserve
* one additional TIDAW for a TTIC that we may need to add due
* to the placement of the data chunk in memory, and a further
* TIDAW for each page boundary that the TIDAW list may cross
* due to it's own size.
*/
if (max_tidaws) {
cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
>> PAGE_SHIFT);
len += cross_count * sizeof(struct tidaw);
}
if (intrg_max_tidaws) {
cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
>> PAGE_SHIFT);
len += cross_count * sizeof(struct tidaw);
}
return len;
}
EXPORT_SYMBOL(itcw_calc_size);
#define CROSS4K(x, l) (((x) & ~4095) != ((x + l) & ~4095))
static inline void *fit_chunk(addr_t *start, addr_t end, size_t len,
int align, int check_4k)
{
addr_t addr;
addr = ALIGN(*start, align);
if (check_4k && CROSS4K(addr, len)) {
addr = ALIGN(addr, 4096);
addr = ALIGN(addr, align);
}
if (addr + len > end)
return ERR_PTR(-ENOSPC);
*start = addr + len;
return (void *) addr;
}
/**
* itcw_init - initialize incremental tcw data structure
* @buffer: address of buffer to use for data structures
* @size: number of bytes in buffer
* @op: %ITCW_OP_READ for a read operation tcw, %ITCW_OP_WRITE for a write
* operation tcw
* @intrg: if non-zero, add and initialize an interrogate tcw
* @max_tidaws: maximum number of tidaws to be used for data addressing or zero
* if no tida is to be used.
* @intrg_max_tidaws: maximum number of tidaws to be used for data addressing
* by the interrogate tcw, if specified
*
* Prepare the specified buffer to be used as an incremental tcw, i.e. a
* helper data structure that can be used to construct a valid tcw by
* successive calls to other helper functions. Note: the buffer needs to be
* located below the 2G address limit. The resulting tcw has the following
* restrictions:
* - no tccb tidal
* - input/output tidal is contiguous (no ttic)
* - total data should not exceed 4k
* - tcw specifies either read or write operation
*
* On success, return pointer to the resulting incremental tcw data structure,
* ERR_PTR otherwise.
*/
struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
int max_tidaws, int intrg_max_tidaws)
{
struct itcw *itcw;
void *chunk;
addr_t start;
addr_t end;
int cross_count;
/* Check for 2G limit. */
start = (addr_t) buffer;
end = start + size;
if ((virt_to_phys(buffer) + size) > (1 << 31))
return ERR_PTR(-EINVAL);
memset(buffer, 0, size);
/* ITCW. */
chunk = fit_chunk(&start, end, sizeof(struct itcw), 1, 0);
if (IS_ERR(chunk))
return chunk;
itcw = chunk;
/* allow for TTIC tidaws that may be needed to cross a page boundary */
cross_count = 0;
if (max_tidaws)
cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
>> PAGE_SHIFT);
itcw->max_tidaws = max_tidaws + cross_count;
cross_count = 0;
if (intrg_max_tidaws)
cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
>> PAGE_SHIFT);
itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
/* Main TCW. */
chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
if (IS_ERR(chunk))
return chunk;
itcw->tcw = chunk;
tcw_init(itcw->tcw, (op == ITCW_OP_READ) ? 1 : 0,
(op == ITCW_OP_WRITE) ? 1 : 0);
/* Interrogate TCW. */
if (intrg) {
chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
if (IS_ERR(chunk))
return chunk;
itcw->intrg_tcw = chunk;
tcw_init(itcw->intrg_tcw, 1, 0);
tcw_set_intrg(itcw->tcw, itcw->intrg_tcw);
}
/* Data TIDAL. */
if (max_tidaws > 0) {
chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
itcw->max_tidaws, 16, 0);
if (IS_ERR(chunk))
return chunk;
tcw_set_data(itcw->tcw, chunk, 1);
}
/* Interrogate data TIDAL. */
if (intrg && (intrg_max_tidaws > 0)) {
chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
itcw->intrg_max_tidaws, 16, 0);
if (IS_ERR(chunk))
return chunk;
tcw_set_data(itcw->intrg_tcw, chunk, 1);
}
/* TSB. */
chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
if (IS_ERR(chunk))
return chunk;
tsb_init(chunk);
tcw_set_tsb(itcw->tcw, chunk);
/* Interrogate TSB. */
if (intrg) {
chunk = fit_chunk(&start, end, sizeof(struct tsb), 8, 0);
if (IS_ERR(chunk))
return chunk;
tsb_init(chunk);
tcw_set_tsb(itcw->intrg_tcw, chunk);
}
/* TCCB. */
chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
if (IS_ERR(chunk))
return chunk;
tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_DEFAULT);
tcw_set_tccb(itcw->tcw, chunk);
/* Interrogate TCCB. */
if (intrg) {
chunk = fit_chunk(&start, end, TCCB_MAX_SIZE, 8, 0);
if (IS_ERR(chunk))
return chunk;
tccb_init(chunk, TCCB_MAX_SIZE, TCCB_SAC_INTRG);
tcw_set_tccb(itcw->intrg_tcw, chunk);
tccb_add_dcw(chunk, TCCB_MAX_SIZE, DCW_CMD_INTRG, 0, NULL,
sizeof(struct dcw_intrg_data), 0);
tcw_finalize(itcw->intrg_tcw, 0);
}
return itcw;
}
EXPORT_SYMBOL(itcw_init);
/**
* itcw_add_dcw - add a dcw to the itcw
* @itcw: address of the itcw
* @cmd: the dcw command
* @flags: flags for the dcw
* @cd: address of control data for this dcw or NULL if none is required
* @cd_count: number of control data bytes for this dcw
* @count: number of data bytes for this dcw
*
* Add a new dcw to the specified itcw by writing the dcw information specified
* by @cmd, @flags, @cd, @cd_count and @count to the tca of the tccb. Return
* a pointer to the newly added dcw on success or -%ENOSPC if the new dcw
* would exceed the available space.
*
* Note: the tcal field of the tccb header will be updated to reflect added
* content.
*/
struct dcw *itcw_add_dcw(struct itcw *itcw, u8 cmd, u8 flags, void *cd,
u8 cd_count, u32 count)
{
return tccb_add_dcw(tcw_get_tccb(itcw->tcw), TCCB_MAX_SIZE, cmd,
flags, cd, cd_count, count);
}
EXPORT_SYMBOL(itcw_add_dcw);
/**
* itcw_add_tidaw - add a tidaw to the itcw
* @itcw: address of the itcw
* @flags: flags for the new tidaw
* @addr: address value for the new tidaw
* @count: count value for the new tidaw
*
* Add a new tidaw to the input/output data tidaw-list of the specified itcw
* (depending on the value of the r-flag and w-flag). Return a pointer to
* the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
* available space.
*
* Note: TTIC tidaws are automatically added when needed, so explicitly calling
* this interface with the TTIC flag is not supported. The last-tidaw flag
* for the last tidaw in the list will be set by itcw_finalize.
*/
struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
{
struct tidaw *following;
if (itcw->num_tidaws >= itcw->max_tidaws)
return ERR_PTR(-ENOSPC);
/*
* Is the tidaw, which follows the one we are about to fill, on the next
* page? Then we have to insert a TTIC tidaw first, that points to the
* tidaw on the new page.
*/
following = ((struct tidaw *) tcw_get_data(itcw->tcw))
+ itcw->num_tidaws + 1;
if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
TIDAW_FLAGS_TTIC, following, 0);
if (itcw->num_tidaws >= itcw->max_tidaws)
return ERR_PTR(-ENOSPC);
}
return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
}
EXPORT_SYMBOL(itcw_add_tidaw);
/**
* itcw_set_data - set data address and tida flag of the itcw
* @itcw: address of the itcw
* @addr: the data address
* @use_tidal: zero of the data address specifies a contiguous block of data,
* non-zero if it specifies a list if tidaws.
*
* Set the input/output data address of the itcw (depending on the value of the
* r-flag and w-flag). If @use_tidal is non-zero, the corresponding tida flag
* is set as well.
*/
void itcw_set_data(struct itcw *itcw, void *addr, int use_tidal)
{
tcw_set_data(itcw->tcw, addr, use_tidal);
}
EXPORT_SYMBOL(itcw_set_data);
/**
* itcw_finalize - calculate length and count fields of the itcw
* @itcw: address of the itcw
*
* Calculate tcw input-/output-count and tccbl fields and add a tcat the tccb.
* In case input- or output-tida is used, the tidaw-list must be stored in
* continuous storage (no ttic). The tcal field in the tccb must be
* up-to-date.
*/
void itcw_finalize(struct itcw *itcw)
{
tcw_finalize(itcw->tcw, itcw->num_tidaws);
}
EXPORT_SYMBOL(itcw_finalize);
| linux-master | drivers/s390/cio/itcw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Channel path related status regions for vfio_ccw
*
* Copyright IBM Corp. 2020
*
* Author(s): Farhan Ali <[email protected]>
* Eric Farman <[email protected]>
*/
#include <linux/slab.h>
#include <linux/vfio.h>
#include "vfio_ccw_private.h"
static ssize_t vfio_ccw_schib_region_read(struct vfio_ccw_private *private,
char __user *buf, size_t count,
loff_t *ppos)
{
struct subchannel *sch = to_subchannel(private->vdev.dev->parent);
unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_schib_region *region;
int ret;
if (pos + count > sizeof(*region))
return -EINVAL;
mutex_lock(&private->io_mutex);
region = private->region[i].data;
if (cio_update_schib(sch)) {
ret = -ENODEV;
goto out;
}
memcpy(region, &sch->schib, sizeof(*region));
if (copy_to_user(buf, (void *)region + pos, count)) {
ret = -EFAULT;
goto out;
}
ret = count;
out:
mutex_unlock(&private->io_mutex);
return ret;
}
static ssize_t vfio_ccw_schib_region_write(struct vfio_ccw_private *private,
const char __user *buf, size_t count,
loff_t *ppos)
{
return -EINVAL;
}
static void vfio_ccw_schib_region_release(struct vfio_ccw_private *private,
struct vfio_ccw_region *region)
{
}
static const struct vfio_ccw_regops vfio_ccw_schib_region_ops = {
.read = vfio_ccw_schib_region_read,
.write = vfio_ccw_schib_region_write,
.release = vfio_ccw_schib_region_release,
};
int vfio_ccw_register_schib_dev_regions(struct vfio_ccw_private *private)
{
return vfio_ccw_register_dev_region(private,
VFIO_REGION_SUBTYPE_CCW_SCHIB,
&vfio_ccw_schib_region_ops,
sizeof(struct ccw_schib_region),
VFIO_REGION_INFO_FLAG_READ,
private->schib_region);
}
static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
char __user *buf, size_t count,
loff_t *ppos)
{
unsigned int i = VFIO_CCW_OFFSET_TO_INDEX(*ppos) - VFIO_CCW_NUM_REGIONS;
loff_t pos = *ppos & VFIO_CCW_OFFSET_MASK;
struct ccw_crw_region *region;
struct vfio_ccw_crw *crw;
int ret;
if (pos + count > sizeof(*region))
return -EINVAL;
crw = list_first_entry_or_null(&private->crw,
struct vfio_ccw_crw, next);
if (crw)
list_del(&crw->next);
mutex_lock(&private->io_mutex);
region = private->region[i].data;
if (crw)
memcpy(®ion->crw, &crw->crw, sizeof(region->crw));
if (copy_to_user(buf, (void *)region + pos, count))
ret = -EFAULT;
else
ret = count;
region->crw = 0;
mutex_unlock(&private->io_mutex);
kfree(crw);
/* Notify the guest if more CRWs are on our queue */
if (!list_empty(&private->crw) && private->crw_trigger)
eventfd_signal(private->crw_trigger, 1);
return ret;
}
static ssize_t vfio_ccw_crw_region_write(struct vfio_ccw_private *private,
const char __user *buf, size_t count,
loff_t *ppos)
{
return -EINVAL;
}
static void vfio_ccw_crw_region_release(struct vfio_ccw_private *private,
struct vfio_ccw_region *region)
{
}
static const struct vfio_ccw_regops vfio_ccw_crw_region_ops = {
.read = vfio_ccw_crw_region_read,
.write = vfio_ccw_crw_region_write,
.release = vfio_ccw_crw_region_release,
};
int vfio_ccw_register_crw_dev_regions(struct vfio_ccw_private *private)
{
return vfio_ccw_register_dev_region(private,
VFIO_REGION_SUBTYPE_CCW_CRW,
&vfio_ccw_crw_region_ops,
sizeof(struct ccw_crw_region),
VFIO_REGION_INFO_FLAG_READ,
private->crw_region);
}
| linux-master | drivers/s390/cio/vfio_ccw_chp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Channel subsystem I/O instructions.
*/
#include <linux/export.h>
#include <asm/asm-extable.h>
#include <asm/chpid.h>
#include <asm/schid.h>
#include <asm/crw.h>
#include "ioasm.h"
#include "orb.h"
#include "cio.h"
#include "cio_inject.h"
static inline int __stsch(struct subchannel_id schid, struct schib *addr)
{
unsigned long r1 = *(unsigned int *)&schid;
int ccode = -EIO;
asm volatile(
" lgr 1,%[r1]\n"
" stsch %[addr]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+&d" (ccode), [addr] "=Q" (*addr)
: [r1] "d" (r1)
: "cc", "1");
return ccode;
}
int stsch(struct subchannel_id schid, struct schib *addr)
{
int ccode;
ccode = __stsch(schid, addr);
trace_s390_cio_stsch(schid, addr, ccode);
return ccode;
}
EXPORT_SYMBOL(stsch);
static inline int __msch(struct subchannel_id schid, struct schib *addr)
{
unsigned long r1 = *(unsigned int *)&schid;
int ccode = -EIO;
asm volatile(
" lgr 1,%[r1]\n"
" msch %[addr]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+&d" (ccode)
: [r1] "d" (r1), [addr] "Q" (*addr)
: "cc", "1");
return ccode;
}
int msch(struct subchannel_id schid, struct schib *addr)
{
int ccode;
ccode = __msch(schid, addr);
trace_s390_cio_msch(schid, addr, ccode);
return ccode;
}
static inline int __tsch(struct subchannel_id schid, struct irb *addr)
{
unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
" lgr 1,%[r1]\n"
" tsch %[addr]\n"
" ipm %[cc]\n"
" srl %[cc],28"
: [cc] "=&d" (ccode), [addr] "=Q" (*addr)
: [r1] "d" (r1)
: "cc", "1");
return ccode;
}
int tsch(struct subchannel_id schid, struct irb *addr)
{
int ccode;
ccode = __tsch(schid, addr);
trace_s390_cio_tsch(schid, addr, ccode);
return ccode;
}
static inline int __ssch(struct subchannel_id schid, union orb *addr)
{
unsigned long r1 = *(unsigned int *)&schid;
int ccode = -EIO;
asm volatile(
" lgr 1,%[r1]\n"
" ssch %[addr]\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+&d" (ccode)
: [r1] "d" (r1), [addr] "Q" (*addr)
: "cc", "memory", "1");
return ccode;
}
int ssch(struct subchannel_id schid, union orb *addr)
{
int ccode;
ccode = __ssch(schid, addr);
trace_s390_cio_ssch(schid, addr, ccode);
return ccode;
}
EXPORT_SYMBOL(ssch);
static inline int __csch(struct subchannel_id schid)
{
unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
" lgr 1,%[r1]\n"
" csch\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (ccode)
: [r1] "d" (r1)
: "cc", "1");
return ccode;
}
int csch(struct subchannel_id schid)
{
int ccode;
ccode = __csch(schid);
trace_s390_cio_csch(schid, ccode);
return ccode;
}
EXPORT_SYMBOL(csch);
int tpi(struct tpi_info *addr)
{
int ccode;
asm volatile(
" tpi %[addr]\n"
" ipm %[cc]\n"
" srl %[cc],28"
: [cc] "=&d" (ccode), [addr] "=Q" (*addr)
:
: "cc");
trace_s390_cio_tpi(addr, ccode);
return ccode;
}
int chsc(void *chsc_area)
{
typedef struct { char _[4096]; } addr_type;
int cc = -EIO;
asm volatile(
" .insn rre,0xb25f0000,%[chsc_area],0\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b, 1b)
: [cc] "+&d" (cc), "+m" (*(addr_type *)chsc_area)
: [chsc_area] "d" (chsc_area)
: "cc");
trace_s390_cio_chsc(chsc_area, cc);
return cc;
}
EXPORT_SYMBOL(chsc);
static inline int __rsch(struct subchannel_id schid)
{
unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
" lgr 1,%[r1]\n"
" rsch\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (ccode)
: [r1] "d" (r1)
: "cc", "memory", "1");
return ccode;
}
int rsch(struct subchannel_id schid)
{
int ccode;
ccode = __rsch(schid);
trace_s390_cio_rsch(schid, ccode);
return ccode;
}
static inline int __hsch(struct subchannel_id schid)
{
unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
" lgr 1,%[r1]\n"
" hsch\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (ccode)
: [r1] "d" (r1)
: "cc", "1");
return ccode;
}
int hsch(struct subchannel_id schid)
{
int ccode;
ccode = __hsch(schid);
trace_s390_cio_hsch(schid, ccode);
return ccode;
}
EXPORT_SYMBOL(hsch);
static inline int __xsch(struct subchannel_id schid)
{
unsigned long r1 = *(unsigned int *)&schid;
int ccode;
asm volatile(
" lgr 1,%[r1]\n"
" xsch\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (ccode)
: [r1] "d" (r1)
: "cc", "1");
return ccode;
}
int xsch(struct subchannel_id schid)
{
int ccode;
ccode = __xsch(schid);
trace_s390_cio_xsch(schid, ccode);
return ccode;
}
static inline int __stcrw(struct crw *crw)
{
int ccode;
asm volatile(
" stcrw %[crw]\n"
" ipm %[cc]\n"
" srl %[cc],28\n"
: [cc] "=&d" (ccode), [crw] "=Q" (*crw)
:
: "cc");
return ccode;
}
static inline int _stcrw(struct crw *crw)
{
#ifdef CONFIG_CIO_INJECT
if (static_branch_unlikely(&cio_inject_enabled)) {
if (stcrw_get_injected(crw) == 0)
return 0;
}
#endif
return __stcrw(crw);
}
int stcrw(struct crw *crw)
{
int ccode;
ccode = _stcrw(crw);
trace_s390_cio_stcrw(crw, ccode);
return ccode;
}
| linux-master | drivers/s390/cio/ioasm.c |
// SPDX-License-Identifier: GPL-1.0+
/*
* bus driver for ccw devices
*
* Copyright IBM Corp. 2002, 2008
* Author(s): Arnd Bergmann ([email protected])
* Cornelia Huck ([email protected])
* Martin Schwidefsky ([email protected])
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/export.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/kernel_stat.h>
#include <linux/sched/signal.h>
#include <linux/dma-mapping.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/param.h> /* HZ */
#include <asm/cmb.h>
#include <asm/isc.h>
#include "chp.h"
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "io_sch.h"
#include "blacklist.h"
#include "chsc.h"
static struct timer_list recovery_timer;
static DEFINE_SPINLOCK(recovery_lock);
static int recovery_phase;
static const unsigned long recovery_delay[] = { 3, 30, 300 };
static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
static struct bus_type ccw_bus_type;
/******************* bus type handling ***********************/
/* The Linux driver model distinguishes between a bus type and
* the bus itself. Of course we only have one channel
* subsystem driver and one channel system per machine, but
* we still use the abstraction. T.R. says it's a good idea. */
static int
ccw_bus_match (struct device * dev, struct device_driver * drv)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_driver *cdrv = to_ccwdrv(drv);
const struct ccw_device_id *ids = cdrv->ids, *found;
if (!ids)
return 0;
found = ccw_device_id_match(ids, &cdev->id);
if (!found)
return 0;
cdev->id.driver_info = found->driver_info;
return 1;
}
/* Store modalias string delimited by prefix/suffix string into buffer with
* specified size. Return length of resulting string (excluding trailing '\0')
* even if string doesn't fit buffer (snprintf semantics). */
static int snprint_alias(char *buf, size_t size,
const struct ccw_device_id *id, const char *suffix)
{
int len;
len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
if (len > size)
return len;
buf += len;
size -= len;
if (id->dev_type != 0)
len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
id->dev_model, suffix);
else
len += snprintf(buf, size, "dtdm%s", suffix);
return len;
}
/* Set up environment variables for ccw device uevent. Return 0 on success,
* non-zero otherwise. */
static int ccw_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct ccw_device *cdev = to_ccwdev(dev);
const struct ccw_device_id *id = &(cdev->id);
int ret;
char modalias_buf[30];
/* CU_TYPE= */
ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
if (ret)
return ret;
/* CU_MODEL= */
ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
if (ret)
return ret;
/* The next two can be zero, that's ok for us */
/* DEV_TYPE= */
ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
if (ret)
return ret;
/* DEV_MODEL= */
ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
if (ret)
return ret;
/* MODALIAS= */
snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
return ret;
}
static void io_subchannel_irq(struct subchannel *);
static int io_subchannel_probe(struct subchannel *);
static void io_subchannel_remove(struct subchannel *);
static void io_subchannel_shutdown(struct subchannel *);
static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int);
static void recovery_func(struct timer_list *unused);
static struct css_device_id io_subchannel_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
{ /* end of list */ },
};
static int io_subchannel_settle(void)
{
int ret;
ret = wait_event_interruptible(ccw_device_init_wq,
atomic_read(&ccw_device_init_count) == 0);
if (ret)
return -EINTR;
flush_workqueue(cio_work_q);
return 0;
}
static struct css_driver io_subchannel_driver = {
.drv = {
.owner = THIS_MODULE,
.name = "io_subchannel",
},
.subchannel_type = io_subchannel_ids,
.irq = io_subchannel_irq,
.sch_event = io_subchannel_sch_event,
.chp_event = io_subchannel_chp_event,
.probe = io_subchannel_probe,
.remove = io_subchannel_remove,
.shutdown = io_subchannel_shutdown,
.settle = io_subchannel_settle,
};
int __init io_subchannel_init(void)
{
int ret;
timer_setup(&recovery_timer, recovery_func, 0);
ret = bus_register(&ccw_bus_type);
if (ret)
return ret;
ret = css_driver_register(&io_subchannel_driver);
if (ret)
bus_unregister(&ccw_bus_type);
return ret;
}
/************************ device handling **************************/
static ssize_t
devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_device_id *id = &(cdev->id);
if (id->dev_type != 0)
return sprintf(buf, "%04x/%02x\n",
id->dev_type, id->dev_model);
else
return sprintf(buf, "n/a\n");
}
static ssize_t
cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_device_id *id = &(cdev->id);
return sprintf(buf, "%04x/%02x\n",
id->cu_type, id->cu_model);
}
static ssize_t
modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_device_id *id = &(cdev->id);
int len;
len = snprint_alias(buf, PAGE_SIZE, id, "\n");
return len > PAGE_SIZE ? PAGE_SIZE : len;
}
static ssize_t
online_show (struct device *dev, struct device_attribute *attr, char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
return sprintf(buf, cdev->online ? "1\n" : "0\n");
}
int ccw_device_is_orphan(struct ccw_device *cdev)
{
return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
}
static void ccw_device_unregister(struct ccw_device *cdev)
{
mutex_lock(&cdev->reg_mutex);
if (device_is_registered(&cdev->dev)) {
/* Undo device_add(). */
device_del(&cdev->dev);
}
mutex_unlock(&cdev->reg_mutex);
if (cdev->private->flags.initialized) {
cdev->private->flags.initialized = 0;
/* Release reference from device_initialize(). */
put_device(&cdev->dev);
}
}
static void io_subchannel_quiesce(struct subchannel *);
/**
* ccw_device_set_offline() - disable a ccw device for I/O
* @cdev: target ccw device
*
* This function calls the driver's set_offline() function for @cdev, if
* given, and then disables @cdev.
* Returns:
* %0 on success and a negative error value on failure.
* Context:
* enabled, ccw device lock not held
*/
int ccw_device_set_offline(struct ccw_device *cdev)
{
struct subchannel *sch;
int ret, state;
if (!cdev)
return -ENODEV;
if (!cdev->online || !cdev->drv)
return -EINVAL;
if (cdev->drv->set_offline) {
ret = cdev->drv->set_offline(cdev);
if (ret != 0)
return ret;
}
spin_lock_irq(cdev->ccwlock);
sch = to_subchannel(cdev->dev.parent);
cdev->online = 0;
/* Wait until a final state or DISCONNECTED is reached */
while (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED));
spin_lock_irq(cdev->ccwlock);
}
do {
ret = ccw_device_offline(cdev);
if (!ret)
break;
CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
"0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
if (ret != -EBUSY)
goto error;
state = cdev->private->state;
spin_unlock_irq(cdev->ccwlock);
io_subchannel_quiesce(sch);
spin_lock_irq(cdev->ccwlock);
cdev->private->state = state;
} while (ret == -EBUSY);
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED));
/* Inform the user if set offline failed. */
if (cdev->private->state == DEV_STATE_BOXED) {
pr_warn("%s: The device entered boxed state while being set offline\n",
dev_name(&cdev->dev));
} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
pr_warn("%s: The device stopped operating while being set offline\n",
dev_name(&cdev->dev));
}
/* Give up reference from ccw_device_set_online(). */
put_device(&cdev->dev);
return 0;
error:
cdev->private->state = DEV_STATE_OFFLINE;
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
spin_unlock_irq(cdev->ccwlock);
/* Give up reference from ccw_device_set_online(). */
put_device(&cdev->dev);
return -ENODEV;
}
/**
* ccw_device_set_online() - enable a ccw device for I/O
* @cdev: target ccw device
*
* This function first enables @cdev and then calls the driver's set_online()
* function for @cdev, if given. If set_online() returns an error, @cdev is
* disabled again.
* Returns:
* %0 on success and a negative error value on failure.
* Context:
* enabled, ccw device lock not held
*/
int ccw_device_set_online(struct ccw_device *cdev)
{
int ret;
int ret2;
if (!cdev)
return -ENODEV;
if (cdev->online || !cdev->drv)
return -EINVAL;
/* Hold on to an extra reference while device is online. */
if (!get_device(&cdev->dev))
return -ENODEV;
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_online(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
else {
CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return ret;
}
spin_lock_irq(cdev->ccwlock);
/* Check if online processing was successful */
if ((cdev->private->state != DEV_STATE_ONLINE) &&
(cdev->private->state != DEV_STATE_W4SENSE)) {
spin_unlock_irq(cdev->ccwlock);
/* Inform the user that set online failed. */
if (cdev->private->state == DEV_STATE_BOXED) {
pr_warn("%s: Setting the device online failed because it is boxed\n",
dev_name(&cdev->dev));
} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
pr_warn("%s: Setting the device online failed because it is not operational\n",
dev_name(&cdev->dev));
}
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return -ENODEV;
}
spin_unlock_irq(cdev->ccwlock);
if (cdev->drv->set_online)
ret = cdev->drv->set_online(cdev);
if (ret)
goto rollback;
spin_lock_irq(cdev->ccwlock);
cdev->online = 1;
spin_unlock_irq(cdev->ccwlock);
return 0;
rollback:
spin_lock_irq(cdev->ccwlock);
/* Wait until a final state or DISCONNECTED is reached */
while (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED));
spin_lock_irq(cdev->ccwlock);
}
ret2 = ccw_device_offline(cdev);
if (ret2)
goto error;
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
cdev->private->state == DEV_STATE_DISCONNECTED));
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return ret;
error:
CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
"device 0.%x.%04x\n",
ret2, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
cdev->private->state = DEV_STATE_OFFLINE;
spin_unlock_irq(cdev->ccwlock);
/* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return ret;
}
static int online_store_handle_offline(struct ccw_device *cdev)
{
if (cdev->private->state == DEV_STATE_DISCONNECTED) {
spin_lock_irq(cdev->ccwlock);
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
spin_unlock_irq(cdev->ccwlock);
return 0;
}
if (cdev->drv && cdev->drv->set_offline)
return ccw_device_set_offline(cdev);
return -EINVAL;
}
static int online_store_recog_and_online(struct ccw_device *cdev)
{
/* Do device recognition, if needed. */
if (cdev->private->state == DEV_STATE_BOXED) {
spin_lock_irq(cdev->ccwlock);
ccw_device_recognition(cdev);
spin_unlock_irq(cdev->ccwlock);
wait_event(cdev->private->wait_q,
cdev->private->flags.recog_done);
if (cdev->private->state != DEV_STATE_OFFLINE)
/* recognition failed */
return -EAGAIN;
}
if (cdev->drv && cdev->drv->set_online)
return ccw_device_set_online(cdev);
return -EINVAL;
}
static int online_store_handle_online(struct ccw_device *cdev, int force)
{
int ret;
ret = online_store_recog_and_online(cdev);
if (ret && !force)
return ret;
if (force && cdev->private->state == DEV_STATE_BOXED) {
ret = ccw_device_stlck(cdev);
if (ret)
return ret;
if (cdev->id.cu_type == 0)
cdev->private->state = DEV_STATE_NOT_OPER;
ret = online_store_recog_and_online(cdev);
if (ret)
return ret;
}
return 0;
}
static ssize_t online_store (struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
int force, ret;
unsigned long i;
/* Prevent conflict between multiple on-/offline processing requests. */
if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
return -EAGAIN;
/* Prevent conflict between internal I/Os and on-/offline processing. */
if (!dev_fsm_final_state(cdev) &&
cdev->private->state != DEV_STATE_DISCONNECTED) {
ret = -EAGAIN;
goto out;
}
/* Prevent conflict between pending work and on-/offline processing.*/
if (work_pending(&cdev->private->todo_work)) {
ret = -EAGAIN;
goto out;
}
if (!strncmp(buf, "force\n", count)) {
force = 1;
i = 1;
ret = 0;
} else {
force = 0;
ret = kstrtoul(buf, 16, &i);
}
if (ret)
goto out;
device_lock(dev);
switch (i) {
case 0:
ret = online_store_handle_offline(cdev);
break;
case 1:
ret = online_store_handle_online(cdev, force);
break;
default:
ret = -EINVAL;
}
device_unlock(dev);
out:
atomic_set(&cdev->private->onoff, 0);
return (ret < 0) ? ret : count;
}
static ssize_t
available_show (struct device *dev, struct device_attribute *attr, char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct subchannel *sch;
if (ccw_device_is_orphan(cdev))
return sprintf(buf, "no device\n");
switch (cdev->private->state) {
case DEV_STATE_BOXED:
return sprintf(buf, "boxed\n");
case DEV_STATE_DISCONNECTED:
case DEV_STATE_DISCONNECTED_SENSE_ID:
case DEV_STATE_NOT_OPER:
sch = to_subchannel(dev->parent);
if (!sch->lpm)
return sprintf(buf, "no path\n");
else
return sprintf(buf, "no device\n");
default:
/* All other states considered fine. */
return sprintf(buf, "good\n");
}
}
static ssize_t
initiate_logging(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct subchannel *sch = to_subchannel(dev);
int rc;
rc = chsc_siosl(sch->schid);
if (rc < 0) {
pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n",
sch->schid.ssid, sch->schid.sch_no, rc);
return rc;
}
pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
sch->schid.ssid, sch->schid.sch_no);
return count;
}
static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
return sprintf(buf, "%02x\n", sch->vpm);
}
static DEVICE_ATTR_RO(devtype);
static DEVICE_ATTR_RO(cutype);
static DEVICE_ATTR_RO(modalias);
static DEVICE_ATTR_RW(online);
static DEVICE_ATTR(availability, 0444, available_show, NULL);
static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
static DEVICE_ATTR_RO(vpm);
static struct attribute *io_subchannel_attrs[] = {
&dev_attr_logging.attr,
&dev_attr_vpm.attr,
NULL,
};
static const struct attribute_group io_subchannel_attr_group = {
.attrs = io_subchannel_attrs,
};
static struct attribute * ccwdev_attrs[] = {
&dev_attr_devtype.attr,
&dev_attr_cutype.attr,
&dev_attr_modalias.attr,
&dev_attr_online.attr,
&dev_attr_cmb_enable.attr,
&dev_attr_availability.attr,
NULL,
};
static const struct attribute_group ccwdev_attr_group = {
.attrs = ccwdev_attrs,
};
static const struct attribute_group *ccwdev_attr_groups[] = {
&ccwdev_attr_group,
NULL,
};
static int match_dev_id(struct device *dev, const void *data)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_dev_id *dev_id = (void *)data;
return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
}
/**
* get_ccwdev_by_dev_id() - obtain device from a ccw device id
* @dev_id: id of the device to be searched
*
* This function searches all devices attached to the ccw bus for a device
* matching @dev_id.
* Returns:
* If a device is found its reference count is increased and returned;
* else %NULL is returned.
*/
struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
{
struct device *dev;
dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
return dev ? to_ccwdev(dev) : NULL;
}
EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
{
int ret;
mutex_lock(&cdev->reg_mutex);
if (device_is_registered(&cdev->dev)) {
device_release_driver(&cdev->dev);
ret = device_attach(&cdev->dev);
WARN_ON(ret == -ENODEV);
}
mutex_unlock(&cdev->reg_mutex);
}
static void
ccw_device_release(struct device *dev)
{
struct ccw_device *cdev;
cdev = to_ccwdev(dev);
cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
sizeof(*cdev->private->dma_area));
cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
/* Release reference of parent subchannel. */
put_device(cdev->dev.parent);
kfree(cdev->private);
kfree(cdev);
}
static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
{
struct ccw_device *cdev;
struct gen_pool *dma_pool;
int ret;
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev) {
ret = -ENOMEM;
goto err_cdev;
}
cdev->private = kzalloc(sizeof(struct ccw_device_private),
GFP_KERNEL | GFP_DMA);
if (!cdev->private) {
ret = -ENOMEM;
goto err_priv;
}
cdev->dev.dma_mask = sch->dev.dma_mask;
ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask);
if (ret)
goto err_coherent_mask;
dma_pool = cio_gp_dma_create(&cdev->dev, 1);
if (!dma_pool) {
ret = -ENOMEM;
goto err_dma_pool;
}
cdev->private->dma_pool = dma_pool;
cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
sizeof(*cdev->private->dma_area));
if (!cdev->private->dma_area) {
ret = -ENOMEM;
goto err_dma_area;
}
return cdev;
err_dma_area:
cio_gp_dma_destroy(dma_pool, &cdev->dev);
err_dma_pool:
err_coherent_mask:
kfree(cdev->private);
err_priv:
kfree(cdev);
err_cdev:
return ERR_PTR(ret);
}
static void ccw_device_todo(struct work_struct *work);
static int io_subchannel_initialize_dev(struct subchannel *sch,
struct ccw_device *cdev)
{
struct ccw_device_private *priv = cdev->private;
int ret;
priv->cdev = cdev;
priv->int_class = IRQIO_CIO;
priv->state = DEV_STATE_NOT_OPER;
priv->dev_id.devno = sch->schib.pmcw.dev;
priv->dev_id.ssid = sch->schid.ssid;
INIT_WORK(&priv->todo_work, ccw_device_todo);
INIT_LIST_HEAD(&priv->cmb_list);
init_waitqueue_head(&priv->wait_q);
timer_setup(&priv->timer, ccw_device_timeout, 0);
mutex_init(&cdev->reg_mutex);
atomic_set(&priv->onoff, 0);
cdev->ccwlock = sch->lock;
cdev->dev.parent = &sch->dev;
cdev->dev.release = ccw_device_release;
cdev->dev.bus = &ccw_bus_type;
cdev->dev.groups = ccwdev_attr_groups;
/* Do first half of device_register. */
device_initialize(&cdev->dev);
ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
if (ret)
goto out_put;
if (!get_device(&sch->dev)) {
ret = -ENODEV;
goto out_put;
}
priv->flags.initialized = 1;
spin_lock_irq(sch->lock);
sch_set_cdev(sch, cdev);
spin_unlock_irq(sch->lock);
return 0;
out_put:
/* Release reference from device_initialize(). */
put_device(&cdev->dev);
return ret;
}
static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
{
struct ccw_device *cdev;
int ret;
cdev = io_subchannel_allocate_dev(sch);
if (!IS_ERR(cdev)) {
ret = io_subchannel_initialize_dev(sch, cdev);
if (ret)
cdev = ERR_PTR(ret);
}
return cdev;
}
static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
static void sch_create_and_recog_new_device(struct subchannel *sch)
{
struct ccw_device *cdev;
/* Need to allocate a new ccw device. */
cdev = io_subchannel_create_ccwdev(sch);
if (IS_ERR(cdev)) {
/* OK, we did everything we could... */
css_sch_device_unregister(sch);
return;
}
/* Start recognition for the new ccw device. */
io_subchannel_recog(cdev, sch);
}
/*
* Register recognized device.
*/
static void io_subchannel_register(struct ccw_device *cdev)
{
struct subchannel *sch;
int ret, adjust_init_count = 1;
unsigned long flags;
sch = to_subchannel(cdev->dev.parent);
/*
* Check if subchannel is still registered. It may have become
* unregistered if a machine check hit us after finishing
* device recognition but before the register work could be
* queued.
*/
if (!device_is_registered(&sch->dev))
goto out_err;
css_update_ssd_info(sch);
/*
* io_subchannel_register() will also be called after device
* recognition has been done for a boxed device (which will already
* be registered). We need to reprobe since we may now have sense id
* information.
*/
mutex_lock(&cdev->reg_mutex);
if (device_is_registered(&cdev->dev)) {
if (!cdev->drv) {
ret = device_reprobe(&cdev->dev);
if (ret)
/* We can't do much here. */
CIO_MSG_EVENT(0, "device_reprobe() returned"
" %d for 0.%x.%04x\n", ret,
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
}
adjust_init_count = 0;
goto out;
}
/* make it known to the system */
ret = device_add(&cdev->dev);
if (ret) {
CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, ret);
spin_lock_irqsave(sch->lock, flags);
sch_set_cdev(sch, NULL);
spin_unlock_irqrestore(sch->lock, flags);
mutex_unlock(&cdev->reg_mutex);
/* Release initial device reference. */
put_device(&cdev->dev);
goto out_err;
}
out:
cdev->private->flags.recog_done = 1;
mutex_unlock(&cdev->reg_mutex);
wake_up(&cdev->private->wait_q);
out_err:
if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
}
/*
* subchannel recognition done. Called from the state machine.
*/
void
io_subchannel_recog_done(struct ccw_device *cdev)
{
if (css_init_done == 0) {
cdev->private->flags.recog_done = 1;
return;
}
switch (cdev->private->state) {
case DEV_STATE_BOXED:
/* Device did not respond in time. */
case DEV_STATE_NOT_OPER:
cdev->private->flags.recog_done = 1;
/* Remove device found not operational. */
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
break;
case DEV_STATE_OFFLINE:
/*
* We can't register the device in interrupt context so
* we schedule a work item.
*/
ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
break;
}
}
static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
{
/* Increase counter of devices currently in recognition. */
atomic_inc(&ccw_device_init_count);
/* Start async. device sensing. */
spin_lock_irq(sch->lock);
ccw_device_recognition(cdev);
spin_unlock_irq(sch->lock);
}
static int ccw_device_move_to_sch(struct ccw_device *cdev,
struct subchannel *sch)
{
struct subchannel *old_sch;
int rc, old_enabled = 0;
old_sch = to_subchannel(cdev->dev.parent);
/* Obtain child reference for new parent. */
if (!get_device(&sch->dev))
return -ENODEV;
if (!sch_is_pseudo_sch(old_sch)) {
spin_lock_irq(old_sch->lock);
old_enabled = old_sch->schib.pmcw.ena;
rc = 0;
if (old_enabled)
rc = cio_disable_subchannel(old_sch);
spin_unlock_irq(old_sch->lock);
if (rc == -EBUSY) {
/* Release child reference for new parent. */
put_device(&sch->dev);
return rc;
}
}
mutex_lock(&sch->reg_mutex);
rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
mutex_unlock(&sch->reg_mutex);
if (rc) {
CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schib.pmcw.dev, rc);
if (old_enabled) {
/* Try to re-enable the old subchannel. */
spin_lock_irq(old_sch->lock);
cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch));
spin_unlock_irq(old_sch->lock);
}
/* Release child reference for new parent. */
put_device(&sch->dev);
return rc;
}
/* Clean up old subchannel. */
if (!sch_is_pseudo_sch(old_sch)) {
spin_lock_irq(old_sch->lock);
sch_set_cdev(old_sch, NULL);
spin_unlock_irq(old_sch->lock);
css_schedule_eval(old_sch->schid);
}
/* Release child reference for old parent. */
put_device(&old_sch->dev);
/* Initialize new subchannel. */
spin_lock_irq(sch->lock);
cdev->ccwlock = sch->lock;
if (!sch_is_pseudo_sch(sch))
sch_set_cdev(sch, cdev);
spin_unlock_irq(sch->lock);
if (!sch_is_pseudo_sch(sch))
css_update_ssd_info(sch);
return 0;
}
static int ccw_device_move_to_orph(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct channel_subsystem *css = to_css(sch->dev.parent);
return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
}
static void io_subchannel_irq(struct subchannel *sch)
{
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
CIO_TRACE_EVENT(6, "IRQ");
CIO_TRACE_EVENT(6, dev_name(&sch->dev));
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
else
inc_irq_stat(IRQIO_CIO);
}
void io_subchannel_init_config(struct subchannel *sch)
{
memset(&sch->config, 0, sizeof(sch->config));
sch->config.csense = 1;
}
static void io_subchannel_init_fields(struct subchannel *sch)
{
if (cio_is_console(sch->schid))
sch->opm = 0xff;
else
sch->opm = chp_get_sch_opm(sch);
sch->lpm = sch->schib.pmcw.pam & sch->opm;
sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
" - PIM = %02X, PAM = %02X, POM = %02X\n",
sch->schib.pmcw.dev, sch->schid.ssid,
sch->schid.sch_no, sch->schib.pmcw.pim,
sch->schib.pmcw.pam, sch->schib.pmcw.pom);
io_subchannel_init_config(sch);
}
/*
* Note: We always return 0 so that we bind to the device even on error.
* This is needed so that our remove function is called on unregister.
*/
static int io_subchannel_probe(struct subchannel *sch)
{
struct io_subchannel_private *io_priv;
struct ccw_device *cdev;
int rc;
if (cio_is_console(sch->schid)) {
rc = sysfs_create_group(&sch->dev.kobj,
&io_subchannel_attr_group);
if (rc)
CIO_MSG_EVENT(0, "Failed to create io subchannel "
"attributes for subchannel "
"0.%x.%04x (rc=%d)\n",
sch->schid.ssid, sch->schid.sch_no, rc);
/*
* The console subchannel already has an associated ccw_device.
* Register it and exit.
*/
cdev = sch_get_cdev(sch);
rc = device_add(&cdev->dev);
if (rc) {
/* Release online reference. */
put_device(&cdev->dev);
goto out_schedule;
}
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
return 0;
}
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch);
if (rc)
goto out_schedule;
rc = sysfs_create_group(&sch->dev.kobj,
&io_subchannel_attr_group);
if (rc)
goto out_schedule;
/* Allocate I/O subchannel private data. */
io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
if (!io_priv)
goto out_schedule;
io_priv->dma_area = dma_alloc_coherent(&sch->dev,
sizeof(*io_priv->dma_area),
&io_priv->dma_area_dma, GFP_KERNEL);
if (!io_priv->dma_area) {
kfree(io_priv);
goto out_schedule;
}
set_io_private(sch, io_priv);
css_schedule_eval(sch->schid);
return 0;
out_schedule:
spin_lock_irq(sch->lock);
css_sched_sch_todo(sch, SCH_TODO_UNREG);
spin_unlock_irq(sch->lock);
return 0;
}
static void io_subchannel_remove(struct subchannel *sch)
{
struct io_subchannel_private *io_priv = to_io_private(sch);
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
if (!cdev)
goto out_free;
ccw_device_unregister(cdev);
spin_lock_irq(sch->lock);
sch_set_cdev(sch, NULL);
set_io_private(sch, NULL);
spin_unlock_irq(sch->lock);
out_free:
dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
io_priv->dma_area, io_priv->dma_area_dma);
kfree(io_priv);
sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
}
static void io_subchannel_verify(struct subchannel *sch)
{
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
if (cdev)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
else
css_schedule_eval(sch->schid);
}
static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
{
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
if (!cdev)
return;
if (cio_update_schib(sch))
goto err;
/* Check for I/O on path. */
if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
goto out;
if (cdev->private->state == DEV_STATE_ONLINE) {
ccw_device_kill_io(cdev);
goto out;
}
if (cio_clear(sch))
goto err;
out:
/* Trigger path verification. */
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
return;
err:
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
}
static int io_subchannel_chp_event(struct subchannel *sch,
struct chp_link *link, int event)
{
struct ccw_device *cdev = sch_get_cdev(sch);
int mask, chpid, valid_bit;
int path_event[8];
mask = chp_ssd_get_mask(&sch->ssd_info, link);
if (!mask)
return 0;
switch (event) {
case CHP_VARY_OFF:
sch->opm &= ~mask;
sch->lpm &= ~mask;
if (cdev)
cdev->private->path_gone_mask |= mask;
io_subchannel_terminate_path(sch, mask);
break;
case CHP_VARY_ON:
sch->opm |= mask;
sch->lpm |= mask;
if (cdev)
cdev->private->path_new_mask |= mask;
io_subchannel_verify(sch);
break;
case CHP_OFFLINE:
if (cio_update_schib(sch))
return -ENODEV;
if (cdev)
cdev->private->path_gone_mask |= mask;
io_subchannel_terminate_path(sch, mask);
break;
case CHP_ONLINE:
if (cio_update_schib(sch))
return -ENODEV;
sch->lpm |= mask & sch->opm;
if (cdev)
cdev->private->path_new_mask |= mask;
io_subchannel_verify(sch);
break;
case CHP_FCES_EVENT:
/* Forward Endpoint Security event */
for (chpid = 0, valid_bit = 0x80; chpid < 8; chpid++,
valid_bit >>= 1) {
if (mask & valid_bit)
path_event[chpid] = PE_PATH_FCES_EVENT;
else
path_event[chpid] = PE_NONE;
}
if (cdev && cdev->drv && cdev->drv->path_event)
cdev->drv->path_event(cdev, path_event);
break;
}
return 0;
}
static void io_subchannel_quiesce(struct subchannel *sch)
{
struct ccw_device *cdev;
int ret;
spin_lock_irq(sch->lock);
cdev = sch_get_cdev(sch);
if (cio_is_console(sch->schid))
goto out_unlock;
if (!sch->schib.pmcw.ena)
goto out_unlock;
ret = cio_disable_subchannel(sch);
if (ret != -EBUSY)
goto out_unlock;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
while (ret == -EBUSY) {
cdev->private->state = DEV_STATE_QUIESCE;
cdev->private->iretry = 255;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, HZ/10);
spin_unlock_irq(sch->lock);
wait_event(cdev->private->wait_q,
cdev->private->state != DEV_STATE_QUIESCE);
spin_lock_irq(sch->lock);
}
ret = cio_disable_subchannel(sch);
}
out_unlock:
spin_unlock_irq(sch->lock);
}
static void io_subchannel_shutdown(struct subchannel *sch)
{
io_subchannel_quiesce(sch);
}
static int device_is_disconnected(struct ccw_device *cdev)
{
if (!cdev)
return 0;
return (cdev->private->state == DEV_STATE_DISCONNECTED ||
cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
}
static int recovery_check(struct device *dev, void *data)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct subchannel *sch;
int *redo = data;
spin_lock_irq(cdev->ccwlock);
switch (cdev->private->state) {
case DEV_STATE_ONLINE:
sch = to_subchannel(cdev->dev.parent);
if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
break;
fallthrough;
case DEV_STATE_DISCONNECTED:
CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
*redo = 1;
break;
case DEV_STATE_DISCONNECTED_SENSE_ID:
*redo = 1;
break;
}
spin_unlock_irq(cdev->ccwlock);
return 0;
}
static void recovery_work_func(struct work_struct *unused)
{
int redo = 0;
bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
if (redo) {
spin_lock_irq(&recovery_lock);
if (!timer_pending(&recovery_timer)) {
if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
recovery_phase++;
mod_timer(&recovery_timer, jiffies +
recovery_delay[recovery_phase] * HZ);
}
spin_unlock_irq(&recovery_lock);
} else
CIO_MSG_EVENT(3, "recovery: end\n");
}
static DECLARE_WORK(recovery_work, recovery_work_func);
static void recovery_func(struct timer_list *unused)
{
/*
* We can't do our recovery in softirq context and it's not
* performance critical, so we schedule it.
*/
schedule_work(&recovery_work);
}
void ccw_device_schedule_recovery(void)
{
unsigned long flags;
CIO_MSG_EVENT(3, "recovery: schedule\n");
spin_lock_irqsave(&recovery_lock, flags);
if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
recovery_phase = 0;
mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
}
spin_unlock_irqrestore(&recovery_lock, flags);
}
static int purge_fn(struct device *dev, void *data)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_dev_id *id = &cdev->private->dev_id;
struct subchannel *sch = to_subchannel(cdev->dev.parent);
spin_lock_irq(cdev->ccwlock);
if (is_blacklisted(id->ssid, id->devno) &&
(cdev->private->state == DEV_STATE_OFFLINE) &&
(atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
id->devno);
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
css_sched_sch_todo(sch, SCH_TODO_UNREG);
atomic_set(&cdev->private->onoff, 0);
}
spin_unlock_irq(cdev->ccwlock);
/* Abort loop in case of pending signal. */
if (signal_pending(current))
return -EINTR;
return 0;
}
/**
* ccw_purge_blacklisted - purge unused, blacklisted devices
*
* Unregister all ccw devices that are offline and on the blacklist.
*/
int ccw_purge_blacklisted(void)
{
CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
return 0;
}
void ccw_device_set_disconnected(struct ccw_device *cdev)
{
if (!cdev)
return;
ccw_device_set_timeout(cdev, 0);
cdev->private->flags.fake_irb = 0;
cdev->private->state = DEV_STATE_DISCONNECTED;
if (cdev->online)
ccw_device_schedule_recovery();
}
void ccw_device_set_notoper(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
CIO_TRACE_EVENT(2, "notoper");
CIO_TRACE_EVENT(2, dev_name(&sch->dev));
ccw_device_set_timeout(cdev, 0);
cio_disable_subchannel(sch);
cdev->private->state = DEV_STATE_NOT_OPER;
}
enum io_sch_action {
IO_SCH_UNREG,
IO_SCH_ORPH_UNREG,
IO_SCH_UNREG_CDEV,
IO_SCH_ATTACH,
IO_SCH_UNREG_ATTACH,
IO_SCH_ORPH_ATTACH,
IO_SCH_REPROBE,
IO_SCH_VERIFY,
IO_SCH_DISC,
IO_SCH_NOP,
};
static enum io_sch_action sch_get_action(struct subchannel *sch)
{
struct ccw_device *cdev;
cdev = sch_get_cdev(sch);
if (cio_update_schib(sch)) {
/* Not operational. */
if (!cdev)
return IO_SCH_UNREG;
if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
return IO_SCH_UNREG;
return IO_SCH_ORPH_UNREG;
}
/* Operational. */
if (!cdev)
return IO_SCH_ATTACH;
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
return IO_SCH_UNREG_ATTACH;
return IO_SCH_ORPH_ATTACH;
}
if ((sch->schib.pmcw.pam & sch->opm) == 0) {
if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
return IO_SCH_UNREG_CDEV;
return IO_SCH_DISC;
}
if (device_is_disconnected(cdev))
return IO_SCH_REPROBE;
if (cdev->online)
return IO_SCH_VERIFY;
if (cdev->private->state == DEV_STATE_NOT_OPER)
return IO_SCH_UNREG_ATTACH;
return IO_SCH_NOP;
}
/**
* io_subchannel_sch_event - process subchannel event
* @sch: subchannel
* @process: non-zero if function is called in process context
*
* An unspecified event occurred for this subchannel. Adjust data according
* to the current operational state of the subchannel and device. Return
* zero when the event has been handled sufficiently or -EAGAIN when this
* function should be called again in process context.
*/
static int io_subchannel_sch_event(struct subchannel *sch, int process)
{
unsigned long flags;
struct ccw_device *cdev;
struct ccw_dev_id dev_id;
enum io_sch_action action;
int rc = -EAGAIN;
spin_lock_irqsave(sch->lock, flags);
if (!device_is_registered(&sch->dev))
goto out_unlock;
if (work_pending(&sch->todo_work))
goto out_unlock;
cdev = sch_get_cdev(sch);
if (cdev && work_pending(&cdev->private->todo_work))
goto out_unlock;
action = sch_get_action(sch);
CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
sch->schid.ssid, sch->schid.sch_no, process,
action);
/* Perform immediate actions while holding the lock. */
switch (action) {
case IO_SCH_REPROBE:
/* Trigger device recognition. */
ccw_device_trigger_reprobe(cdev);
rc = 0;
goto out_unlock;
case IO_SCH_VERIFY:
/* Trigger path verification. */
io_subchannel_verify(sch);
rc = 0;
goto out_unlock;
case IO_SCH_DISC:
ccw_device_set_disconnected(cdev);
rc = 0;
goto out_unlock;
case IO_SCH_ORPH_UNREG:
case IO_SCH_ORPH_ATTACH:
ccw_device_set_disconnected(cdev);
break;
case IO_SCH_UNREG_CDEV:
case IO_SCH_UNREG_ATTACH:
case IO_SCH_UNREG:
if (!cdev)
break;
if (cdev->private->state == DEV_STATE_SENSE_ID) {
/*
* Note: delayed work triggered by this event
* and repeated calls to sch_event are synchronized
* by the above check for work_pending(cdev).
*/
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
} else
ccw_device_set_notoper(cdev);
break;
case IO_SCH_NOP:
rc = 0;
goto out_unlock;
default:
break;
}
spin_unlock_irqrestore(sch->lock, flags);
/* All other actions require process context. */
if (!process)
goto out;
/* Handle attached ccw device. */
switch (action) {
case IO_SCH_ORPH_UNREG:
case IO_SCH_ORPH_ATTACH:
/* Move ccw device to orphanage. */
rc = ccw_device_move_to_orph(cdev);
if (rc)
goto out;
break;
case IO_SCH_UNREG_CDEV:
case IO_SCH_UNREG_ATTACH:
spin_lock_irqsave(sch->lock, flags);
sch_set_cdev(sch, NULL);
spin_unlock_irqrestore(sch->lock, flags);
/* Unregister ccw device. */
ccw_device_unregister(cdev);
break;
default:
break;
}
/* Handle subchannel. */
switch (action) {
case IO_SCH_ORPH_UNREG:
case IO_SCH_UNREG:
css_sch_device_unregister(sch);
break;
case IO_SCH_ORPH_ATTACH:
case IO_SCH_UNREG_ATTACH:
case IO_SCH_ATTACH:
dev_id.ssid = sch->schid.ssid;
dev_id.devno = sch->schib.pmcw.dev;
cdev = get_ccwdev_by_dev_id(&dev_id);
if (!cdev) {
sch_create_and_recog_new_device(sch);
break;
}
rc = ccw_device_move_to_sch(cdev, sch);
if (rc) {
/* Release reference from get_ccwdev_by_dev_id() */
put_device(&cdev->dev);
goto out;
}
spin_lock_irqsave(sch->lock, flags);
ccw_device_trigger_reprobe(cdev);
spin_unlock_irqrestore(sch->lock, flags);
/* Release reference from get_ccwdev_by_dev_id() */
put_device(&cdev->dev);
break;
default:
break;
}
return 0;
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
out:
return rc;
}
static void ccw_device_set_int_class(struct ccw_device *cdev)
{
struct ccw_driver *cdrv = cdev->drv;
/* Note: we interpret class 0 in this context as an uninitialized
* field since it translates to a non-I/O interrupt class. */
if (cdrv->int_class != 0)
cdev->private->int_class = cdrv->int_class;
else
cdev->private->int_class = IRQIO_CIO;
}
#ifdef CONFIG_CCW_CONSOLE
int __init ccw_device_enable_console(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
int rc;
if (!cdev->drv || !cdev->handler)
return -EINVAL;
io_subchannel_init_fields(sch);
rc = cio_commit_config(sch);
if (rc)
return rc;
sch->driver = &io_subchannel_driver;
io_subchannel_recog(cdev, sch);
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq(cdev->ccwlock);
while (!dev_fsm_final_state(cdev))
ccw_device_wait_idle(cdev);
/* Hold on to an extra reference while device is online. */
get_device(&cdev->dev);
rc = ccw_device_online(cdev);
if (rc)
goto out_unlock;
while (!dev_fsm_final_state(cdev))
ccw_device_wait_idle(cdev);
if (cdev->private->state == DEV_STATE_ONLINE)
cdev->online = 1;
else
rc = -EIO;
out_unlock:
spin_unlock_irq(cdev->ccwlock);
if (rc) /* Give up online reference since onlining failed. */
put_device(&cdev->dev);
return rc;
}
struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
{
struct io_subchannel_private *io_priv;
struct ccw_device *cdev;
struct subchannel *sch;
sch = cio_probe_console();
if (IS_ERR(sch))
return ERR_CAST(sch);
io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
if (!io_priv)
goto err_priv;
io_priv->dma_area = dma_alloc_coherent(&sch->dev,
sizeof(*io_priv->dma_area),
&io_priv->dma_area_dma, GFP_KERNEL);
if (!io_priv->dma_area)
goto err_dma_area;
set_io_private(sch, io_priv);
cdev = io_subchannel_create_ccwdev(sch);
if (IS_ERR(cdev)) {
dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
io_priv->dma_area, io_priv->dma_area_dma);
set_io_private(sch, NULL);
put_device(&sch->dev);
kfree(io_priv);
return cdev;
}
cdev->drv = drv;
ccw_device_set_int_class(cdev);
return cdev;
err_dma_area:
kfree(io_priv);
err_priv:
put_device(&sch->dev);
return ERR_PTR(-ENOMEM);
}
void __init ccw_device_destroy_console(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct io_subchannel_private *io_priv = to_io_private(sch);
set_io_private(sch, NULL);
dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
io_priv->dma_area, io_priv->dma_area_dma);
put_device(&sch->dev);
put_device(&cdev->dev);
kfree(io_priv);
}
/**
* ccw_device_wait_idle() - busy wait for device to become idle
* @cdev: ccw device
*
* Poll until activity control is zero, that is, no function or data
* transfer is pending/active.
* Called with device lock being held.
*/
void ccw_device_wait_idle(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
while (1) {
cio_tsch(sch);
if (sch->schib.scsw.cmd.actl == 0)
break;
udelay(100);
}
}
#endif
/**
* get_ccwdev_by_busid() - obtain device from a bus id
* @cdrv: driver the device is owned by
* @bus_id: bus id of the device to be searched
*
* This function searches all devices owned by @cdrv for a device with a bus
* id matching @bus_id.
* Returns:
* If a match is found, its reference count of the found device is increased
* and it is returned; else %NULL is returned.
*/
struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
const char *bus_id)
{
struct device *dev;
dev = driver_find_device_by_name(&cdrv->driver, bus_id);
return dev ? to_ccwdev(dev) : NULL;
}
/************************** device driver handling ************************/
/* This is the implementation of the ccw_driver class. The probe, remove
* and release methods are initially very similar to the device_driver
* implementations, with the difference that they have ccw_device
* arguments.
*
* A ccw driver also contains the information that is needed for
* device matching.
*/
static int
ccw_device_probe (struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
int ret;
cdev->drv = cdrv; /* to let the driver call _set_online */
ccw_device_set_int_class(cdev);
ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
if (ret) {
cdev->drv = NULL;
cdev->private->int_class = IRQIO_CIO;
return ret;
}
return 0;
}
static void ccw_device_remove(struct device *dev)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct ccw_driver *cdrv = cdev->drv;
struct subchannel *sch;
int ret;
if (cdrv->remove)
cdrv->remove(cdev);
spin_lock_irq(cdev->ccwlock);
if (cdev->online) {
cdev->online = 0;
ret = ccw_device_offline(cdev);
spin_unlock_irq(cdev->ccwlock);
if (ret == 0)
wait_event(cdev->private->wait_q,
dev_fsm_final_state(cdev));
else
CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
/* Give up reference obtained in ccw_device_set_online(). */
put_device(&cdev->dev);
spin_lock_irq(cdev->ccwlock);
}
ccw_device_set_timeout(cdev, 0);
cdev->drv = NULL;
cdev->private->int_class = IRQIO_CIO;
sch = to_subchannel(cdev->dev.parent);
spin_unlock_irq(cdev->ccwlock);
io_subchannel_quiesce(sch);
__disable_cmf(cdev);
}
static void ccw_device_shutdown(struct device *dev)
{
struct ccw_device *cdev;
cdev = to_ccwdev(dev);
if (cdev->drv && cdev->drv->shutdown)
cdev->drv->shutdown(cdev);
__disable_cmf(cdev);
}
static struct bus_type ccw_bus_type = {
.name = "ccw",
.match = ccw_bus_match,
.uevent = ccw_uevent,
.probe = ccw_device_probe,
.remove = ccw_device_remove,
.shutdown = ccw_device_shutdown,
};
/**
* ccw_driver_register() - register a ccw driver
* @cdriver: driver to be registered
*
* This function is mainly a wrapper around driver_register().
* Returns:
* %0 on success and a negative error value on failure.
*/
int ccw_driver_register(struct ccw_driver *cdriver)
{
struct device_driver *drv = &cdriver->driver;
drv->bus = &ccw_bus_type;
return driver_register(drv);
}
/**
* ccw_driver_unregister() - deregister a ccw driver
* @cdriver: driver to be deregistered
*
* This function is mainly a wrapper around driver_unregister().
*/
void ccw_driver_unregister(struct ccw_driver *cdriver)
{
driver_unregister(&cdriver->driver);
}
static void ccw_device_todo(struct work_struct *work)
{
struct ccw_device_private *priv;
struct ccw_device *cdev;
struct subchannel *sch;
enum cdev_todo todo;
priv = container_of(work, struct ccw_device_private, todo_work);
cdev = priv->cdev;
sch = to_subchannel(cdev->dev.parent);
/* Find out todo. */
spin_lock_irq(cdev->ccwlock);
todo = priv->todo;
priv->todo = CDEV_TODO_NOTHING;
CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
priv->dev_id.ssid, priv->dev_id.devno, todo);
spin_unlock_irq(cdev->ccwlock);
/* Perform todo. */
switch (todo) {
case CDEV_TODO_ENABLE_CMF:
cmf_reenable(cdev);
break;
case CDEV_TODO_REBIND:
ccw_device_do_unbind_bind(cdev);
break;
case CDEV_TODO_REGISTER:
io_subchannel_register(cdev);
break;
case CDEV_TODO_UNREG_EVAL:
if (!sch_is_pseudo_sch(sch))
css_schedule_eval(sch->schid);
fallthrough;
case CDEV_TODO_UNREG:
spin_lock_irq(sch->lock);
sch_set_cdev(sch, NULL);
spin_unlock_irq(sch->lock);
ccw_device_unregister(cdev);
break;
default:
break;
}
/* Release workqueue ref. */
put_device(&cdev->dev);
}
/**
* ccw_device_sched_todo - schedule ccw device operation
* @cdev: ccw device
* @todo: todo
*
* Schedule the operation identified by @todo to be performed on the slow path
* workqueue. Do nothing if another operation with higher priority is already
* scheduled. Needs to be called with ccwdev lock held.
*/
void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
{
CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
todo);
if (cdev->private->todo >= todo)
return;
cdev->private->todo = todo;
/* Get workqueue ref. */
if (!get_device(&cdev->dev))
return;
if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
/* Already queued, release workqueue ref. */
put_device(&cdev->dev);
}
}
/**
* ccw_device_siosl() - initiate logging
* @cdev: ccw device
*
* This function is used to invoke model-dependent logging within the channel
* subsystem.
*/
int ccw_device_siosl(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
return chsc_siosl(sch->schid);
}
EXPORT_SYMBOL_GPL(ccw_device_siosl);
EXPORT_SYMBOL(ccw_device_set_online);
EXPORT_SYMBOL(ccw_device_set_offline);
EXPORT_SYMBOL(ccw_driver_register);
EXPORT_SYMBOL(ccw_driver_unregister);
EXPORT_SYMBOL(get_ccwdev_by_busid);
| linux-master | drivers/s390/cio/device.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2000, 2009
* Author(s): Utz Bacher <[email protected]>
* Cornelia Huck <[email protected]>
* Jan Glauber <[email protected]>
*/
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/kernel_stat.h>
#include <linux/atomic.h>
#include <linux/rculist.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include <asm/tpi.h>
#include "cio.h"
#include "ioasm.h"
#include "qdio.h"
#include "qdio_debug.h"
/*
* Restriction: only 63 iqdio subchannels would have its own indicator,
* after that, subsequent subchannels share one indicator
*/
#define TIQDIO_NR_NONSHARED_IND 63
#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
#define TIQDIO_SHARED_IND 63
/* device state change indicators */
struct indicator_t {
u32 ind; /* u32 because of compare-and-swap performance */
atomic_t count; /* use count, 0 or 1 for non-shared indicators */
};
/* list of thin interrupt input queues */
static LIST_HEAD(tiq_list);
static DEFINE_MUTEX(tiq_list_lock);
static struct indicator_t *q_indicators;
u64 last_ai_time;
/* returns addr for the device state change indicator */
static u32 *get_indicator(void)
{
int i;
for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
if (!atomic_cmpxchg(&q_indicators[i].count, 0, 1))
return &q_indicators[i].ind;
/* use the shared indicator */
atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
return &q_indicators[TIQDIO_SHARED_IND].ind;
}
static void put_indicator(u32 *addr)
{
struct indicator_t *ind = container_of(addr, struct indicator_t, ind);
if (!addr)
return;
atomic_dec(&ind->count);
}
static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
{
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
}
int test_nonshared_ind(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return 0;
if (references_shared_dsci(irq_ptr))
return 0;
if (*irq_ptr->dsci)
return 1;
else
return 0;
}
static inline u32 clear_shared_ind(void)
{
if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count))
return 0;
return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
}
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @airq: pointer to adapter interrupt descriptor
* @tpi_info: interrupt information (e.g. floating vs directed -- unused)
*/
static void tiqdio_thinint_handler(struct airq_struct *airq,
struct tpi_info *tpi_info)
{
u64 irq_time = S390_lowcore.int_clock;
u32 si_used = clear_shared_ind();
struct qdio_irq *irq;
last_ai_time = irq_time;
inc_irq_stat(IRQIO_QAI);
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
list_for_each_entry_rcu(irq, &tiq_list, entry) {
/* only process queues from changed sets */
if (unlikely(references_shared_dsci(irq))) {
if (!si_used)
continue;
} else {
if (!*irq->dsci)
continue;
xchg(irq->dsci, 0);
}
qdio_deliver_irq(irq);
irq->last_data_irq_time = irq_time;
QDIO_PERF_STAT_INC(irq, adapter_int);
}
rcu_read_unlock();
}
static struct airq_struct tiqdio_airq = {
.handler = tiqdio_thinint_handler,
.isc = QDIO_AIRQ_ISC,
};
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
u64 summary_indicator_addr, subchannel_indicator_addr;
int rc;
if (reset) {
summary_indicator_addr = 0;
subchannel_indicator_addr = 0;
} else {
summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
}
rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
subchannel_indicator_addr, tiqdio_airq.isc);
if (rc) {
DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no,
scssc->response.code);
goto out;
}
DBF_EVENT("setscind");
DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr));
DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr));
out:
return rc;
}
int qdio_establish_thinint(struct qdio_irq *irq_ptr)
{
int rc;
if (!is_thinint_irq(irq_ptr))
return 0;
irq_ptr->dsci = get_indicator();
DBF_HEX(&irq_ptr->dsci, sizeof(void *));
rc = set_subchannel_ind(irq_ptr, 0);
if (rc) {
put_indicator(irq_ptr->dsci);
return rc;
}
mutex_lock(&tiq_list_lock);
list_add_rcu(&irq_ptr->entry, &tiq_list);
mutex_unlock(&tiq_list_lock);
return 0;
}
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return;
mutex_lock(&tiq_list_lock);
list_del_rcu(&irq_ptr->entry);
mutex_unlock(&tiq_list_lock);
synchronize_rcu();
/* reset adapter interrupt indicators */
set_subchannel_ind(irq_ptr, 1);
put_indicator(irq_ptr->dsci);
}
int __init qdio_thinint_init(void)
{
int rc;
q_indicators = kcalloc(TIQDIO_NR_INDICATORS, sizeof(struct indicator_t),
GFP_KERNEL);
if (!q_indicators)
return -ENOMEM;
rc = register_adapter_interrupt(&tiqdio_airq);
if (rc) {
DBF_EVENT("RTI:%x", rc);
kfree(q_indicators);
return rc;
}
return 0;
}
void __exit qdio_thinint_exit(void)
{
WARN_ON(!list_empty(&tiq_list));
unregister_adapter_interrupt(&tiqdio_airq);
kfree(q_indicators);
}
| linux-master | drivers/s390/cio/qdio_thinint.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for adapter interruptions
*
* Copyright IBM Corp. 1999, 2007
* Author(s): Ingo Adlung <[email protected]>
* Cornelia Huck <[email protected]>
* Arnd Bergmann <[email protected]>
* Peter Oberparleiter <[email protected]>
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include <asm/cio.h>
#include "cio.h"
#include "cio_debug.h"
#include "ioasm.h"
static DEFINE_SPINLOCK(airq_lists_lock);
static struct hlist_head airq_lists[MAX_ISC+1];
static struct dma_pool *airq_iv_cache;
/**
* register_adapter_interrupt() - register adapter interrupt handler
* @airq: pointer to adapter interrupt descriptor
*
* Returns 0 on success, or -EINVAL.
*/
int register_adapter_interrupt(struct airq_struct *airq)
{
char dbf_txt[32];
if (!airq->handler || airq->isc > MAX_ISC)
return -EINVAL;
if (!airq->lsi_ptr) {
airq->lsi_ptr = cio_dma_zalloc(1);
if (!airq->lsi_ptr)
return -ENOMEM;
airq->flags |= AIRQ_PTR_ALLOCATED;
}
snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq);
CIO_TRACE_EVENT(4, dbf_txt);
isc_register(airq->isc);
spin_lock(&airq_lists_lock);
hlist_add_head_rcu(&airq->list, &airq_lists[airq->isc]);
spin_unlock(&airq_lists_lock);
return 0;
}
EXPORT_SYMBOL(register_adapter_interrupt);
/**
* unregister_adapter_interrupt - unregister adapter interrupt handler
* @airq: pointer to adapter interrupt descriptor
*/
void unregister_adapter_interrupt(struct airq_struct *airq)
{
char dbf_txt[32];
if (hlist_unhashed(&airq->list))
return;
snprintf(dbf_txt, sizeof(dbf_txt), "urairq:%p", airq);
CIO_TRACE_EVENT(4, dbf_txt);
spin_lock(&airq_lists_lock);
hlist_del_rcu(&airq->list);
spin_unlock(&airq_lists_lock);
synchronize_rcu();
isc_unregister(airq->isc);
if (airq->flags & AIRQ_PTR_ALLOCATED) {
cio_dma_free(airq->lsi_ptr, 1);
airq->lsi_ptr = NULL;
airq->flags &= ~AIRQ_PTR_ALLOCATED;
}
}
EXPORT_SYMBOL(unregister_adapter_interrupt);
static irqreturn_t do_airq_interrupt(int irq, void *dummy)
{
struct tpi_info *tpi_info;
struct airq_struct *airq;
struct hlist_head *head;
set_cpu_flag(CIF_NOHZ_DELAY);
tpi_info = &get_irq_regs()->tpi_info;
trace_s390_cio_adapter_int(tpi_info);
head = &airq_lists[tpi_info->isc];
rcu_read_lock();
hlist_for_each_entry_rcu(airq, head, list)
if (*airq->lsi_ptr != 0)
airq->handler(airq, tpi_info);
rcu_read_unlock();
return IRQ_HANDLED;
}
void __init init_airq_interrupts(void)
{
irq_set_chip_and_handler(THIN_INTERRUPT,
&dummy_irq_chip, handle_percpu_irq);
if (request_irq(THIN_INTERRUPT, do_airq_interrupt, 0, "AIO", NULL))
panic("Failed to register AIO interrupt\n");
}
static inline unsigned long iv_size(unsigned long bits)
{
return BITS_TO_LONGS(bits) * sizeof(unsigned long);
}
/**
* airq_iv_create - create an interrupt vector
* @bits: number of bits in the interrupt vector
* @flags: allocation flags
* @vec: pointer to pinned guest memory if AIRQ_IV_GUESTVEC
*
* Returns a pointer to an interrupt vector structure
*/
struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags,
unsigned long *vec)
{
struct airq_iv *iv;
unsigned long size;
iv = kzalloc(sizeof(*iv), GFP_KERNEL);
if (!iv)
goto out;
iv->bits = bits;
iv->flags = flags;
size = iv_size(bits);
if (flags & AIRQ_IV_CACHELINE) {
if ((cache_line_size() * BITS_PER_BYTE) < bits
|| !airq_iv_cache)
goto out_free;
iv->vector = dma_pool_zalloc(airq_iv_cache, GFP_KERNEL,
&iv->vector_dma);
if (!iv->vector)
goto out_free;
} else if (flags & AIRQ_IV_GUESTVEC) {
iv->vector = vec;
} else {
iv->vector = cio_dma_zalloc(size);
if (!iv->vector)
goto out_free;
}
if (flags & AIRQ_IV_ALLOC) {
iv->avail = kmalloc(size, GFP_KERNEL);
if (!iv->avail)
goto out_free;
memset(iv->avail, 0xff, size);
iv->end = 0;
} else
iv->end = bits;
if (flags & AIRQ_IV_BITLOCK) {
iv->bitlock = kzalloc(size, GFP_KERNEL);
if (!iv->bitlock)
goto out_free;
}
if (flags & AIRQ_IV_PTR) {
size = bits * sizeof(unsigned long);
iv->ptr = kzalloc(size, GFP_KERNEL);
if (!iv->ptr)
goto out_free;
}
if (flags & AIRQ_IV_DATA) {
size = bits * sizeof(unsigned int);
iv->data = kzalloc(size, GFP_KERNEL);
if (!iv->data)
goto out_free;
}
spin_lock_init(&iv->lock);
return iv;
out_free:
kfree(iv->ptr);
kfree(iv->bitlock);
kfree(iv->avail);
if (iv->flags & AIRQ_IV_CACHELINE && iv->vector)
dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
else if (!(iv->flags & AIRQ_IV_GUESTVEC))
cio_dma_free(iv->vector, size);
kfree(iv);
out:
return NULL;
}
EXPORT_SYMBOL(airq_iv_create);
/**
* airq_iv_release - release an interrupt vector
* @iv: pointer to interrupt vector structure
*/
void airq_iv_release(struct airq_iv *iv)
{
kfree(iv->data);
kfree(iv->ptr);
kfree(iv->bitlock);
if (iv->flags & AIRQ_IV_CACHELINE)
dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
else if (!(iv->flags & AIRQ_IV_GUESTVEC))
cio_dma_free(iv->vector, iv_size(iv->bits));
kfree(iv->avail);
kfree(iv);
}
EXPORT_SYMBOL(airq_iv_release);
/**
* airq_iv_alloc - allocate irq bits from an interrupt vector
* @iv: pointer to an interrupt vector structure
* @num: number of consecutive irq bits to allocate
*
* Returns the bit number of the first irq in the allocated block of irqs,
* or -1UL if no bit is available or the AIRQ_IV_ALLOC flag has not been
* specified
*/
unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
{
unsigned long bit, i, flags;
if (!iv->avail || num == 0)
return -1UL;
spin_lock_irqsave(&iv->lock, flags);
bit = find_first_bit_inv(iv->avail, iv->bits);
while (bit + num <= iv->bits) {
for (i = 1; i < num; i++)
if (!test_bit_inv(bit + i, iv->avail))
break;
if (i >= num) {
/* Found a suitable block of irqs */
for (i = 0; i < num; i++)
clear_bit_inv(bit + i, iv->avail);
if (bit + num >= iv->end)
iv->end = bit + num + 1;
break;
}
bit = find_next_bit_inv(iv->avail, iv->bits, bit + i + 1);
}
if (bit + num > iv->bits)
bit = -1UL;
spin_unlock_irqrestore(&iv->lock, flags);
return bit;
}
EXPORT_SYMBOL(airq_iv_alloc);
/**
* airq_iv_free - free irq bits of an interrupt vector
* @iv: pointer to interrupt vector structure
* @bit: number of the first irq bit to free
* @num: number of consecutive irq bits to free
*/
void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
{
unsigned long i, flags;
if (!iv->avail || num == 0)
return;
spin_lock_irqsave(&iv->lock, flags);
for (i = 0; i < num; i++) {
/* Clear (possibly left over) interrupt bit */
clear_bit_inv(bit + i, iv->vector);
/* Make the bit positions available again */
set_bit_inv(bit + i, iv->avail);
}
if (bit + num >= iv->end) {
/* Find new end of bit-field */
while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
iv->end--;
}
spin_unlock_irqrestore(&iv->lock, flags);
}
EXPORT_SYMBOL(airq_iv_free);
/**
* airq_iv_scan - scan interrupt vector for non-zero bits
* @iv: pointer to interrupt vector structure
* @start: bit number to start the search
* @end: bit number to end the search
*
* Returns the bit number of the next non-zero interrupt bit, or
* -1UL if the scan completed without finding any more any non-zero bits.
*/
unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
unsigned long end)
{
unsigned long bit;
/* Find non-zero bit starting from 'ivs->next'. */
bit = find_next_bit_inv(iv->vector, end, start);
if (bit >= end)
return -1UL;
clear_bit_inv(bit, iv->vector);
return bit;
}
EXPORT_SYMBOL(airq_iv_scan);
int __init airq_init(void)
{
airq_iv_cache = dma_pool_create("airq_iv_cache", cio_get_dma_css_dev(),
cache_line_size(),
cache_line_size(), PAGE_SIZE);
if (!airq_iv_cache)
return -ENOMEM;
return 0;
}
| linux-master | drivers/s390/cio/airq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* VFIO based Physical Subchannel device driver
*
* Copyright IBM Corp. 2017
* Copyright Red Hat, Inc. 2019
*
* Author(s): Dong Jia Shi <[email protected]>
* Xiao Feng Ren <[email protected]>
* Cornelia Huck <[email protected]>
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mdev.h>
#include <asm/isc.h>
#include "chp.h"
#include "ioasm.h"
#include "css.h"
#include "vfio_ccw_private.h"
struct workqueue_struct *vfio_ccw_work_q;
struct kmem_cache *vfio_ccw_io_region;
struct kmem_cache *vfio_ccw_cmd_region;
struct kmem_cache *vfio_ccw_schib_region;
struct kmem_cache *vfio_ccw_crw_region;
debug_info_t *vfio_ccw_debug_msg_id;
debug_info_t *vfio_ccw_debug_trace_id;
/*
* Helpers
*/
int vfio_ccw_sch_quiesce(struct subchannel *sch)
{
struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
DECLARE_COMPLETION_ONSTACK(completion);
int iretry, ret = 0;
/*
* Probably an impossible situation, after being called through
* FSM callbacks. But in the event it did, register a warning
* and return as if things were fine.
*/
if (WARN_ON(!private))
return 0;
iretry = 255;
do {
ret = cio_cancel_halt_clear(sch, &iretry);
if (ret == -EIO) {
pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
sch->schid.ssid, sch->schid.sch_no);
break;
}
/*
* Flush all I/O and wait for
* cancel/halt/clear completion.
*/
private->completion = &completion;
spin_unlock_irq(sch->lock);
if (ret == -EBUSY)
wait_for_completion_timeout(&completion, 3*HZ);
private->completion = NULL;
flush_workqueue(vfio_ccw_work_q);
spin_lock_irq(sch->lock);
ret = cio_disable_subchannel(sch);
} while (ret == -EBUSY);
return ret;
}
void vfio_ccw_sch_io_todo(struct work_struct *work)
{
struct vfio_ccw_private *private;
struct irb *irb;
bool is_final;
bool cp_is_finished = false;
private = container_of(work, struct vfio_ccw_private, io_work);
irb = &private->irb;
is_final = !(scsw_actl(&irb->scsw) &
(SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
if (scsw_is_solicited(&irb->scsw)) {
cp_update_scsw(&private->cp, &irb->scsw);
if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) {
cp_free(&private->cp);
cp_is_finished = true;
}
}
mutex_lock(&private->io_mutex);
memcpy(private->io_region->irb_area, irb, sizeof(*irb));
mutex_unlock(&private->io_mutex);
/*
* Reset to IDLE only if processing of a channel program
* has finished. Do not overwrite a possible processing
* state if the interrupt was unsolicited, or if the final
* interrupt was for HSCH or CSCH.
*/
if (cp_is_finished)
private->state = VFIO_CCW_STATE_IDLE;
if (private->io_trigger)
eventfd_signal(private->io_trigger, 1);
}
void vfio_ccw_crw_todo(struct work_struct *work)
{
struct vfio_ccw_private *private;
private = container_of(work, struct vfio_ccw_private, crw_work);
if (!list_empty(&private->crw) && private->crw_trigger)
eventfd_signal(private->crw_trigger, 1);
}
/*
* Css driver callbacks
*/
static void vfio_ccw_sch_irq(struct subchannel *sch)
{
struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
/*
* The subchannel should still be disabled at this point,
* so an interrupt would be quite surprising. As with an
* interrupt while the FSM is closed, let's attempt to
* disable the subchannel again.
*/
if (!private) {
VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: unexpected interrupt\n",
sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no);
cio_disable_subchannel(sch);
return;
}
inc_irq_stat(IRQIO_CIO);
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
}
static void vfio_ccw_free_parent(struct device *dev)
{
struct vfio_ccw_parent *parent = container_of(dev, struct vfio_ccw_parent, dev);
kfree(parent);
}
static int vfio_ccw_sch_probe(struct subchannel *sch)
{
struct pmcw *pmcw = &sch->schib.pmcw;
struct vfio_ccw_parent *parent;
int ret = -ENOMEM;
if (pmcw->qf) {
dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
dev_name(&sch->dev));
return -ENODEV;
}
parent = kzalloc(struct_size(parent, mdev_types, 1), GFP_KERNEL);
if (!parent)
return -ENOMEM;
dev_set_name(&parent->dev, "parent");
parent->dev.parent = &sch->dev;
parent->dev.release = &vfio_ccw_free_parent;
ret = device_register(&parent->dev);
if (ret)
goto out_free;
dev_set_drvdata(&sch->dev, parent);
parent->mdev_type.sysfs_name = "io";
parent->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)";
parent->mdev_types[0] = &parent->mdev_type;
ret = mdev_register_parent(&parent->parent, &sch->dev,
&vfio_ccw_mdev_driver,
parent->mdev_types, 1);
if (ret)
goto out_unreg;
VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no);
return 0;
out_unreg:
device_del(&parent->dev);
out_free:
put_device(&parent->dev);
dev_set_drvdata(&sch->dev, NULL);
return ret;
}
static void vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
mdev_unregister_parent(&parent->parent);
device_unregister(&parent->dev);
dev_set_drvdata(&sch->dev, NULL);
VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no);
}
static void vfio_ccw_sch_shutdown(struct subchannel *sch)
{
struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
if (!private)
return;
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}
/**
* vfio_ccw_sch_event - process subchannel event
* @sch: subchannel
* @process: non-zero if function is called in process context
*
* An unspecified event occurred for this subchannel. Adjust data according
* to the current operational state of the subchannel. Return zero when the
* event has been handled sufficiently or -EAGAIN when this function should
* be called again in process context.
*/
static int vfio_ccw_sch_event(struct subchannel *sch, int process)
{
struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
unsigned long flags;
int rc = -EAGAIN;
spin_lock_irqsave(sch->lock, flags);
if (!device_is_registered(&sch->dev))
goto out_unlock;
if (work_pending(&sch->todo_work))
goto out_unlock;
rc = 0;
if (cio_update_schib(sch)) {
if (private)
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
}
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
return rc;
}
static void vfio_ccw_queue_crw(struct vfio_ccw_private *private,
unsigned int rsc,
unsigned int erc,
unsigned int rsid)
{
struct vfio_ccw_crw *crw;
/*
* If unable to allocate a CRW, just drop the event and
* carry on. The guest will either see a later one or
* learn when it issues its own store subchannel.
*/
crw = kzalloc(sizeof(*crw), GFP_ATOMIC);
if (!crw)
return;
/*
* Build the CRW based on the inputs given to us.
*/
crw->crw.rsc = rsc;
crw->crw.erc = erc;
crw->crw.rsid = rsid;
list_add_tail(&crw->next, &private->crw);
queue_work(vfio_ccw_work_q, &private->crw_work);
}
static int vfio_ccw_chp_event(struct subchannel *sch,
struct chp_link *link, int event)
{
struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
int mask = chp_ssd_get_mask(&sch->ssd_info, link);
int retry = 255;
if (!private || !mask)
return 0;
trace_vfio_ccw_chp_event(sch->schid, mask, event);
VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n",
sch->schid.cssid,
sch->schid.ssid, sch->schid.sch_no,
mask, event);
if (cio_update_schib(sch))
return -ENODEV;
switch (event) {
case CHP_VARY_OFF:
/* Path logically turned off */
sch->opm &= ~mask;
sch->lpm &= ~mask;
if (sch->schib.pmcw.lpum & mask)
cio_cancel_halt_clear(sch, &retry);
break;
case CHP_OFFLINE:
/* Path is gone */
if (sch->schib.pmcw.lpum & mask)
cio_cancel_halt_clear(sch, &retry);
vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN,
link->chpid.id);
break;
case CHP_VARY_ON:
/* Path logically turned on */
sch->opm |= mask;
sch->lpm |= mask;
break;
case CHP_ONLINE:
/* Path became available */
sch->lpm |= mask & sch->opm;
vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT,
link->chpid.id);
break;
}
return 0;
}
static struct css_device_id vfio_ccw_sch_ids[] = {
{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids);
static struct css_driver vfio_ccw_sch_driver = {
.drv = {
.name = "vfio_ccw",
.owner = THIS_MODULE,
},
.subchannel_type = vfio_ccw_sch_ids,
.irq = vfio_ccw_sch_irq,
.probe = vfio_ccw_sch_probe,
.remove = vfio_ccw_sch_remove,
.shutdown = vfio_ccw_sch_shutdown,
.sch_event = vfio_ccw_sch_event,
.chp_event = vfio_ccw_chp_event,
};
static int __init vfio_ccw_debug_init(void)
{
vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1,
11 * sizeof(long));
if (!vfio_ccw_debug_msg_id)
goto out_unregister;
debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view);
debug_set_level(vfio_ccw_debug_msg_id, 2);
vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16);
if (!vfio_ccw_debug_trace_id)
goto out_unregister;
debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view);
debug_set_level(vfio_ccw_debug_trace_id, 2);
return 0;
out_unregister:
debug_unregister(vfio_ccw_debug_msg_id);
debug_unregister(vfio_ccw_debug_trace_id);
return -1;
}
static void vfio_ccw_debug_exit(void)
{
debug_unregister(vfio_ccw_debug_msg_id);
debug_unregister(vfio_ccw_debug_trace_id);
}
static void vfio_ccw_destroy_regions(void)
{
kmem_cache_destroy(vfio_ccw_crw_region);
kmem_cache_destroy(vfio_ccw_schib_region);
kmem_cache_destroy(vfio_ccw_cmd_region);
kmem_cache_destroy(vfio_ccw_io_region);
}
static int __init vfio_ccw_sch_init(void)
{
int ret;
ret = vfio_ccw_debug_init();
if (ret)
return ret;
vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
if (!vfio_ccw_work_q) {
ret = -ENOMEM;
goto out_regions;
}
vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
sizeof(struct ccw_io_region), 0,
SLAB_ACCOUNT, 0,
sizeof(struct ccw_io_region), NULL);
if (!vfio_ccw_io_region) {
ret = -ENOMEM;
goto out_regions;
}
vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
sizeof(struct ccw_cmd_region), 0,
SLAB_ACCOUNT, 0,
sizeof(struct ccw_cmd_region), NULL);
if (!vfio_ccw_cmd_region) {
ret = -ENOMEM;
goto out_regions;
}
vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
sizeof(struct ccw_schib_region), 0,
SLAB_ACCOUNT, 0,
sizeof(struct ccw_schib_region), NULL);
if (!vfio_ccw_schib_region) {
ret = -ENOMEM;
goto out_regions;
}
vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
sizeof(struct ccw_crw_region), 0,
SLAB_ACCOUNT, 0,
sizeof(struct ccw_crw_region), NULL);
if (!vfio_ccw_crw_region) {
ret = -ENOMEM;
goto out_regions;
}
ret = mdev_register_driver(&vfio_ccw_mdev_driver);
if (ret)
goto out_regions;
isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) {
isc_unregister(VFIO_CCW_ISC);
goto out_driver;
}
return ret;
out_driver:
mdev_unregister_driver(&vfio_ccw_mdev_driver);
out_regions:
vfio_ccw_destroy_regions();
destroy_workqueue(vfio_ccw_work_q);
vfio_ccw_debug_exit();
return ret;
}
static void __exit vfio_ccw_sch_exit(void)
{
css_driver_unregister(&vfio_ccw_sch_driver);
mdev_unregister_driver(&vfio_ccw_mdev_driver);
isc_unregister(VFIO_CCW_ISC);
vfio_ccw_destroy_regions();
destroy_workqueue(vfio_ccw_work_q);
vfio_ccw_debug_exit();
}
module_init(vfio_ccw_sch_init);
module_exit(vfio_ccw_sch_exit);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/s390/cio/vfio_ccw_drv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2002
* Author(s): Cornelia Huck ([email protected])
* Martin Schwidefsky ([email protected])
*
* Status accumulation and basic sense functions.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "io_sch.h"
/*
* Check for any kind of channel or interface control check but don't
* issue the message for the console device
*/
static void
ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
char dbf_text[15];
if (!scsw_is_valid_cstat(&irb->scsw) ||
!(scsw_cstat(&irb->scsw) & (SCHN_STAT_CHN_DATA_CHK |
SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK)))
return;
CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check "
"received"
" ... device %04x on subchannel 0.%x.%04x, dev_stat "
": %02X sch_stat : %02X\n",
cdev->private->dev_id.devno, sch->schid.ssid,
sch->schid.sch_no,
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw));
sprintf(dbf_text, "chk%x", sch->schid.sch_no);
CIO_TRACE_EVENT(0, dbf_text);
CIO_HEX_EVENT(0, irb, sizeof(struct irb));
}
/*
* Some paths became not operational (pno bit in scsw is set).
*/
static void
ccw_device_path_notoper(struct ccw_device *cdev)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
if (cio_update_schib(sch))
goto doverify;
CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
"not operational \n", __func__,
sch->schid.ssid, sch->schid.sch_no,
sch->schib.pmcw.pnom);
sch->lpm &= ~sch->schib.pmcw.pnom;
doverify:
cdev->private->flags.doverify = 1;
}
/*
* Copy valid bits from the extended control word to device irb.
*/
static void
ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb)
{
/*
* Copy extended control bit if it is valid... yes there
* are condition that have to be met for the extended control
* bit to have meaning. Sick.
*/
cdev->private->dma_area->irb.scsw.cmd.ectl = 0;
if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
!(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
/* Check if extended control word is valid. */
if (!cdev->private->dma_area->irb.scsw.cmd.ectl)
return;
/* Copy concurrent sense / model dependent information. */
memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw));
}
/*
* Check if extended status word is valid.
*/
static int
ccw_device_accumulate_esw_valid(struct irb *irb)
{
if (!irb->scsw.cmd.eswf &&
(irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND))
return 0;
if (irb->scsw.cmd.stctl ==
(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) &&
!(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
return 0;
return 1;
}
/*
* Copy valid bits from the extended status word to device irb.
*/
static void
ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb)
{
struct irb *cdev_irb;
struct sublog *cdev_sublog, *sublog;
if (!ccw_device_accumulate_esw_valid(irb))
return;
cdev_irb = &cdev->private->dma_area->irb;
/* Copy last path used mask. */
cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
/* Copy subchannel logout information if esw is of format 0. */
if (irb->scsw.cmd.eswf) {
cdev_sublog = &cdev_irb->esw.esw0.sublog;
sublog = &irb->esw.esw0.sublog;
/* Copy extended status flags. */
cdev_sublog->esf = sublog->esf;
/*
* Copy fields that have a meaning for channel data check
* channel control check and interface control check.
*/
if (irb->scsw.cmd.cstat & (SCHN_STAT_CHN_DATA_CHK |
SCHN_STAT_CHN_CTRL_CHK |
SCHN_STAT_INTF_CTRL_CHK)) {
/* Copy ancillary report bit. */
cdev_sublog->arep = sublog->arep;
/* Copy field-validity-flags. */
cdev_sublog->fvf = sublog->fvf;
/* Copy storage access code. */
cdev_sublog->sacc = sublog->sacc;
/* Copy termination code. */
cdev_sublog->termc = sublog->termc;
/* Copy sequence code. */
cdev_sublog->seqc = sublog->seqc;
}
/* Copy device status check. */
cdev_sublog->devsc = sublog->devsc;
/* Copy secondary error. */
cdev_sublog->serr = sublog->serr;
/* Copy i/o-error alert. */
cdev_sublog->ioerr = sublog->ioerr;
/* Copy channel path timeout bit. */
if (irb->scsw.cmd.cstat & SCHN_STAT_INTF_CTRL_CHK)
cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt;
/* Copy failing storage address validity flag. */
cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf;
if (cdev_irb->esw.esw0.erw.fsavf) {
/* ... and copy the failing storage address. */
memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr,
sizeof (irb->esw.esw0.faddr));
/* ... and copy the failing storage address format. */
cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf;
}
/* Copy secondary ccw address validity bit. */
cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf;
if (irb->esw.esw0.erw.scavf)
/* ... and copy the secondary ccw address. */
cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr;
}
/* FIXME: DCTI for format 2? */
/* Copy authorization bit. */
cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth;
/* Copy path verification required flag. */
cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf;
if (irb->esw.esw0.erw.pvrf)
cdev->private->flags.doverify = 1;
/* Copy concurrent sense bit. */
cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons;
if (irb->esw.esw0.erw.cons)
cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt;
}
/*
* Accumulate status from irb to devstat.
*/
void
ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb)
{
struct irb *cdev_irb;
/*
* Check if the status pending bit is set in stctl.
* If not, the remaining bit have no meaning and we must ignore them.
* The esw is not meaningful as well...
*/
if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
return;
/* Check for channel checks and interface control checks. */
ccw_device_msg_control_check(cdev, irb);
/* Check for path not operational. */
if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
ccw_device_path_notoper(cdev);
/* No irb accumulation for transport mode irbs. */
if (scsw_is_tm(&irb->scsw)) {
memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb));
return;
}
/*
* Don't accumulate unsolicited interrupts.
*/
if (!scsw_is_solicited(&irb->scsw))
return;
cdev_irb = &cdev->private->dma_area->irb;
/*
* If the clear function had been performed, all formerly pending
* status at the subchannel has been cleared and we must not pass
* intermediate accumulated status to the device driver.
*/
if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
/* Copy bits which are valid only for the start function. */
if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
/* Copy key. */
cdev_irb->scsw.cmd.key = irb->scsw.cmd.key;
/* Copy suspend control bit. */
cdev_irb->scsw.cmd.sctl = irb->scsw.cmd.sctl;
/* Accumulate deferred condition code. */
cdev_irb->scsw.cmd.cc |= irb->scsw.cmd.cc;
/* Copy ccw format bit. */
cdev_irb->scsw.cmd.fmt = irb->scsw.cmd.fmt;
/* Copy prefetch bit. */
cdev_irb->scsw.cmd.pfch = irb->scsw.cmd.pfch;
/* Copy initial-status-interruption-control. */
cdev_irb->scsw.cmd.isic = irb->scsw.cmd.isic;
/* Copy address limit checking control. */
cdev_irb->scsw.cmd.alcc = irb->scsw.cmd.alcc;
/* Copy suppress suspend bit. */
cdev_irb->scsw.cmd.ssi = irb->scsw.cmd.ssi;
}
/* Take care of the extended control bit and extended control word. */
ccw_device_accumulate_ecw(cdev, irb);
/* Accumulate function control. */
cdev_irb->scsw.cmd.fctl |= irb->scsw.cmd.fctl;
/* Copy activity control. */
cdev_irb->scsw.cmd.actl = irb->scsw.cmd.actl;
/* Accumulate status control. */
cdev_irb->scsw.cmd.stctl |= irb->scsw.cmd.stctl;
/*
* Copy ccw address if it is valid. This is a bit simplified
* but should be close enough for all practical purposes.
*/
if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) ||
((irb->scsw.cmd.stctl ==
(SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) &&
(irb->scsw.cmd.actl & SCSW_ACTL_DEVACT) &&
(irb->scsw.cmd.actl & SCSW_ACTL_SCHACT)) ||
(irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED))
cdev_irb->scsw.cmd.cpa = irb->scsw.cmd.cpa;
/* Accumulate device status, but not the device busy flag. */
cdev_irb->scsw.cmd.dstat &= ~DEV_STAT_BUSY;
/* dstat is not always valid. */
if (irb->scsw.cmd.stctl &
(SCSW_STCTL_PRIM_STATUS | SCSW_STCTL_SEC_STATUS
| SCSW_STCTL_INTER_STATUS | SCSW_STCTL_ALERT_STATUS))
cdev_irb->scsw.cmd.dstat |= irb->scsw.cmd.dstat;
/* Accumulate subchannel status. */
cdev_irb->scsw.cmd.cstat |= irb->scsw.cmd.cstat;
/* Copy residual count if it is valid. */
if ((irb->scsw.cmd.stctl & SCSW_STCTL_PRIM_STATUS) &&
(irb->scsw.cmd.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN))
== 0)
cdev_irb->scsw.cmd.count = irb->scsw.cmd.count;
/* Take care of bits in the extended status word. */
ccw_device_accumulate_esw(cdev, irb);
/*
* Check whether we must issue a SENSE CCW ourselves if there is no
* concurrent sense facility installed for the subchannel.
* No sense is required if no delayed sense is pending
* and we did not get a unit check without sense information.
*
* Note: We should check for ioinfo[irq]->flags.consns but VM
* violates the ESA/390 architecture and doesn't present an
* operand exception for virtual devices without concurrent
* sense facility available/supported when enabling the
* concurrent sense facility.
*/
if ((cdev_irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
!(cdev_irb->esw.esw0.erw.cons))
cdev->private->flags.dosense = 1;
}
/*
* Do a basic sense.
*/
int
ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
{
struct subchannel *sch;
struct ccw1 *sense_ccw;
int rc;
sch = to_subchannel(cdev->dev.parent);
/* A sense is required, can we do it now ? */
if (scsw_actl(&irb->scsw) & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT))
/*
* we received an Unit Check but we have no final
* status yet, therefore we must delay the SENSE
* processing. We must not report this intermediate
* status to the device interrupt handler.
*/
return -EBUSY;
/*
* We have ending status but no sense information. Do a basic sense.
*/
sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
sense_ccw->cda = virt_to_phys(cdev->private->dma_area->irb.ecw);
sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI;
rc = cio_start(sch, sense_ccw, 0xff);
if (rc == -ENODEV || rc == -EACCES)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
return rc;
}
/*
* Add information from basic sense to devstat.
*/
void
ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb)
{
/*
* Check if the status pending bit is set in stctl.
* If not, the remaining bit have no meaning and we must ignore them.
* The esw is not meaningful as well...
*/
if (!(scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))
return;
/* Check for channel checks and interface control checks. */
ccw_device_msg_control_check(cdev, irb);
/* Check for path not operational. */
if (scsw_is_valid_pno(&irb->scsw) && scsw_pno(&irb->scsw))
ccw_device_path_notoper(cdev);
if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
(irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
}
/* Check if path verification is required. */
if (ccw_device_accumulate_esw_valid(irb) &&
irb->esw.esw0.erw.pvrf)
cdev->private->flags.doverify = 1;
}
/*
* This function accumulates the status into the private devstat and
* starts a basic sense if one is needed.
*/
int
ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb)
{
ccw_device_accumulate_irb(cdev, irb);
if ((irb->scsw.cmd.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0)
return -EBUSY;
/* Check for basic sense. */
if (cdev->private->flags.dosense &&
!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
cdev->private->flags.dosense = 0;
return 0;
}
if (cdev->private->flags.dosense) {
ccw_device_do_sense(cdev, irb);
return -EBUSY;
}
return 0;
}
| linux-master | drivers/s390/cio/device_status.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 1999, 2010
* Author(s): Cornelia Huck ([email protected])
* Arnd Bergmann ([email protected])
* Peter Oberparleiter <[email protected]>
*/
#include <linux/bug.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <asm/chpid.h>
#include <asm/sclp.h>
#include <asm/crw.h>
#include "cio.h"
#include "css.h"
#include "ioasm.h"
#include "cio_debug.h"
#include "chp.h"
#define to_channelpath(device) container_of(device, struct channel_path, dev)
#define CHP_INFO_UPDATE_INTERVAL 1*HZ
enum cfg_task_t {
cfg_none,
cfg_configure,
cfg_deconfigure
};
/* Map for pending configure tasks. */
static enum cfg_task_t chp_cfg_task[__MAX_CSSID + 1][__MAX_CHPID + 1];
static DEFINE_SPINLOCK(cfg_lock);
/* Map for channel-path status. */
static struct sclp_chp_info chp_info;
static DEFINE_MUTEX(info_lock);
/* Time after which channel-path status may be outdated. */
static unsigned long chp_info_expires;
static struct work_struct cfg_work;
/* Wait queue for configure completion events. */
static DECLARE_WAIT_QUEUE_HEAD(cfg_wait_queue);
/* Set vary state for given chpid. */
static void set_chp_logically_online(struct chp_id chpid, int onoff)
{
chpid_to_chp(chpid)->state = onoff;
}
/* On success return 0 if channel-path is varied offline, 1 if it is varied
* online. Return -ENODEV if channel-path is not registered. */
int chp_get_status(struct chp_id chpid)
{
return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
}
/**
* chp_get_sch_opm - return opm for subchannel
* @sch: subchannel
*
* Calculate and return the operational path mask (opm) based on the chpids
* used by the subchannel and the status of the associated channel-paths.
*/
u8 chp_get_sch_opm(struct subchannel *sch)
{
struct chp_id chpid;
int opm;
int i;
opm = 0;
chp_id_init(&chpid);
for (i = 0; i < 8; i++) {
opm <<= 1;
chpid.id = sch->schib.pmcw.chpid[i];
if (chp_get_status(chpid) != 0)
opm |= 1;
}
return opm;
}
EXPORT_SYMBOL_GPL(chp_get_sch_opm);
/**
* chp_is_registered - check if a channel-path is registered
* @chpid: channel-path ID
*
* Return non-zero if a channel-path with the given chpid is registered,
* zero otherwise.
*/
int chp_is_registered(struct chp_id chpid)
{
return chpid_to_chp(chpid) != NULL;
}
/*
* Function: s390_vary_chpid
* Varies the specified chpid online or offline
*/
static int s390_vary_chpid(struct chp_id chpid, int on)
{
char dbf_text[15];
int status;
sprintf(dbf_text, on?"varyon%x.%02x":"varyoff%x.%02x", chpid.cssid,
chpid.id);
CIO_TRACE_EVENT(2, dbf_text);
status = chp_get_status(chpid);
if (!on && !status)
return 0;
set_chp_logically_online(chpid, on);
chsc_chp_vary(chpid, on);
return 0;
}
/*
* Channel measurement related functions
*/
static ssize_t chp_measurement_chars_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
struct device *device;
device = kobj_to_dev(kobj);
chp = to_channelpath(device);
if (chp->cmg == -1)
return 0;
return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
sizeof(chp->cmg_chars));
}
static const struct bin_attribute chp_measurement_chars_attr = {
.attr = {
.name = "measurement_chars",
.mode = S_IRUSR,
},
.size = sizeof(struct cmg_chars),
.read = chp_measurement_chars_read,
};
static void chp_measurement_copy_block(struct cmg_entry *buf,
struct channel_subsystem *css,
struct chp_id chpid)
{
void *area;
struct cmg_entry *entry, reference_buf;
int idx;
if (chpid.id < 128) {
area = css->cub_addr1;
idx = chpid.id;
} else {
area = css->cub_addr2;
idx = chpid.id - 128;
}
entry = area + (idx * sizeof(struct cmg_entry));
do {
memcpy(buf, entry, sizeof(*entry));
memcpy(&reference_buf, entry, sizeof(*entry));
} while (reference_buf.values[0] != buf->values[0]);
}
static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct channel_path *chp;
struct channel_subsystem *css;
struct device *device;
unsigned int size;
device = kobj_to_dev(kobj);
chp = to_channelpath(device);
css = to_css(chp->dev.parent);
size = sizeof(struct cmg_entry);
/* Only allow single reads. */
if (off || count < size)
return 0;
chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->chpid);
count = size;
return count;
}
static const struct bin_attribute chp_measurement_attr = {
.attr = {
.name = "measurement",
.mode = S_IRUSR,
},
.size = sizeof(struct cmg_entry),
.read = chp_measurement_read,
};
void chp_remove_cmg_attr(struct channel_path *chp)
{
device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
device_remove_bin_file(&chp->dev, &chp_measurement_attr);
}
int chp_add_cmg_attr(struct channel_path *chp)
{
int ret;
ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
if (ret)
return ret;
ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
if (ret)
device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
return ret;
}
/*
* Files for the channel path entries.
*/
static ssize_t chp_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *chp = to_channelpath(dev);
int status;
mutex_lock(&chp->lock);
status = chp->state;
mutex_unlock(&chp->lock);
return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
}
static ssize_t chp_status_write(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct channel_path *cp = to_channelpath(dev);
char cmd[10];
int num_args;
int error;
num_args = sscanf(buf, "%5s", cmd);
if (!num_args)
return count;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
if (!strncasecmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
mutex_lock(&cp->lock);
error = s390_vary_chpid(cp->chpid, 1);
mutex_unlock(&cp->lock);
} else if (!strncasecmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
mutex_lock(&cp->lock);
error = s390_vary_chpid(cp->chpid, 0);
mutex_unlock(&cp->lock);
} else
error = -EINVAL;
return error < 0 ? error : count;
}
static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
static ssize_t chp_configure_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *cp;
int status;
cp = to_channelpath(dev);
status = chp_info_get_status(cp->chpid);
if (status < 0)
return status;
return sysfs_emit(buf, "%d\n", status);
}
static int cfg_wait_idle(void);
static ssize_t chp_configure_write(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct channel_path *cp;
int val;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
cp = to_channelpath(dev);
chp_cfg_schedule(cp->chpid, val);
cfg_wait_idle();
return count;
}
static DEVICE_ATTR(configure, 0644, chp_configure_show, chp_configure_write);
static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_path *chp = to_channelpath(dev);
u8 type;
mutex_lock(&chp->lock);
type = chp->desc.desc;
mutex_unlock(&chp->lock);
return sprintf(buf, "%x\n", type);
}
static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
static ssize_t chp_cmg_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_path *chp = to_channelpath(dev);
if (!chp)
return 0;
if (chp->cmg == -1) /* channel measurements not available */
return sprintf(buf, "unknown\n");
return sprintf(buf, "%x\n", chp->cmg);
}
static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
static ssize_t chp_shared_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *chp = to_channelpath(dev);
if (!chp)
return 0;
if (chp->shared == -1) /* channel measurements not available */
return sprintf(buf, "unknown\n");
return sprintf(buf, "%x\n", chp->shared);
}
static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_path *chp = to_channelpath(dev);
ssize_t rc;
mutex_lock(&chp->lock);
if (chp->desc_fmt1.flags & 0x10)
rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
else
rc = 0;
mutex_unlock(&chp->lock);
return rc;
}
static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
static ssize_t chp_chid_external_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *chp = to_channelpath(dev);
ssize_t rc;
mutex_lock(&chp->lock);
if (chp->desc_fmt1.flags & 0x10)
rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
else
rc = 0;
mutex_unlock(&chp->lock);
return rc;
}
static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
static ssize_t chp_esc_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct channel_path *chp = to_channelpath(dev);
ssize_t rc;
mutex_lock(&chp->lock);
rc = sprintf(buf, "%x\n", chp->desc_fmt1.esc);
mutex_unlock(&chp->lock);
return rc;
}
static DEVICE_ATTR(esc, 0444, chp_esc_show, NULL);
static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct channel_path *chp = to_channelpath(kobj_to_dev(kobj));
ssize_t rc;
mutex_lock(&chp->lock);
rc = memory_read_from_buffer(buf, count, &off, chp->desc_fmt3.util_str,
sizeof(chp->desc_fmt3.util_str));
mutex_unlock(&chp->lock);
return rc;
}
static BIN_ATTR_RO(util_string,
sizeof(((struct channel_path_desc_fmt3 *)0)->util_str));
static struct bin_attribute *chp_bin_attrs[] = {
&bin_attr_util_string,
NULL,
};
static struct attribute *chp_attrs[] = {
&dev_attr_status.attr,
&dev_attr_configure.attr,
&dev_attr_type.attr,
&dev_attr_cmg.attr,
&dev_attr_shared.attr,
&dev_attr_chid.attr,
&dev_attr_chid_external.attr,
&dev_attr_esc.attr,
NULL,
};
static struct attribute_group chp_attr_group = {
.attrs = chp_attrs,
.bin_attrs = chp_bin_attrs,
};
static const struct attribute_group *chp_attr_groups[] = {
&chp_attr_group,
NULL,
};
static void chp_release(struct device *dev)
{
struct channel_path *cp;
cp = to_channelpath(dev);
kfree(cp);
}
/**
* chp_update_desc - update channel-path description
* @chp: channel-path
*
* Update the channel-path description of the specified channel-path
* including channel measurement related information.
* Return zero on success, non-zero otherwise.
*/
int chp_update_desc(struct channel_path *chp)
{
int rc;
rc = chsc_determine_fmt0_channel_path_desc(chp->chpid, &chp->desc);
if (rc)
return rc;
/*
* Fetching the following data is optional. Not all machines or
* hypervisors implement the required chsc commands.
*/
chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
chsc_determine_fmt3_channel_path_desc(chp->chpid, &chp->desc_fmt3);
chsc_get_channel_measurement_chars(chp);
return 0;
}
/**
* chp_new - register a new channel-path
* @chpid: channel-path ID
*
* Create and register data structure representing new channel-path. Return
* zero on success, non-zero otherwise.
*/
int chp_new(struct chp_id chpid)
{
struct channel_subsystem *css = css_by_id(chpid.cssid);
struct channel_path *chp;
int ret = 0;
mutex_lock(&css->mutex);
if (chp_is_registered(chpid))
goto out;
chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
if (!chp) {
ret = -ENOMEM;
goto out;
}
/* fill in status, etc. */
chp->chpid = chpid;
chp->state = 1;
chp->dev.parent = &css->device;
chp->dev.groups = chp_attr_groups;
chp->dev.release = chp_release;
mutex_init(&chp->lock);
/* Obtain channel path description and fill it in. */
ret = chp_update_desc(chp);
if (ret)
goto out_free;
if ((chp->desc.flags & 0x80) == 0) {
ret = -ENODEV;
goto out_free;
}
dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
/* make it known to the system */
ret = device_register(&chp->dev);
if (ret) {
CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
chpid.cssid, chpid.id, ret);
put_device(&chp->dev);
goto out;
}
if (css->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
device_unregister(&chp->dev);
goto out;
}
}
css->chps[chpid.id] = chp;
goto out;
out_free:
kfree(chp);
out:
mutex_unlock(&css->mutex);
return ret;
}
/**
* chp_get_chp_desc - return newly allocated channel-path description
* @chpid: channel-path ID
*
* On success return a newly allocated copy of the channel-path description
* data associated with the given channel-path ID. Return %NULL on error.
*/
struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid)
{
struct channel_path *chp;
struct channel_path_desc_fmt0 *desc;
chp = chpid_to_chp(chpid);
if (!chp)
return NULL;
desc = kmalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return NULL;
mutex_lock(&chp->lock);
memcpy(desc, &chp->desc, sizeof(*desc));
mutex_unlock(&chp->lock);
return desc;
}
/**
* chp_process_crw - process channel-path status change
* @crw0: channel report-word to handler
* @crw1: second channel-report word (always NULL)
* @overflow: crw overflow indication
*
* Handle channel-report-words indicating that the status of a channel-path
* has changed.
*/
static void chp_process_crw(struct crw *crw0, struct crw *crw1,
int overflow)
{
struct chp_id chpid;
if (overflow) {
css_schedule_eval_all();
return;
}
CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid);
/*
* Check for solicited machine checks. These are
* created by reset channel path and need not be
* handled here.
*/
if (crw0->slct) {
CIO_CRW_EVENT(2, "solicited machine check for "
"channel path %02X\n", crw0->rsid);
return;
}
chp_id_init(&chpid);
chpid.id = crw0->rsid;
switch (crw0->erc) {
case CRW_ERC_IPARM: /* Path has come. */
case CRW_ERC_INIT:
chp_new(chpid);
chsc_chp_online(chpid);
break;
case CRW_ERC_PERRI: /* Path has gone. */
case CRW_ERC_PERRN:
chsc_chp_offline(chpid);
break;
default:
CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
crw0->erc);
}
}
int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
{
int i;
int mask;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i;
if (!(ssd->path_mask & mask))
continue;
if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid))
continue;
if ((ssd->fla_valid_mask & mask) &&
((ssd->fla[i] & link->fla_mask) != link->fla))
continue;
return mask;
}
return 0;
}
EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
static inline int info_bit_num(struct chp_id id)
{
return id.id + id.cssid * (__MAX_CHPID + 1);
}
/* Force chp_info refresh on next call to info_validate(). */
static void info_expire(void)
{
mutex_lock(&info_lock);
chp_info_expires = jiffies - 1;
mutex_unlock(&info_lock);
}
/* Ensure that chp_info is up-to-date. */
static int info_update(void)
{
int rc;
mutex_lock(&info_lock);
rc = 0;
if (time_after(jiffies, chp_info_expires)) {
/* Data is too old, update. */
rc = sclp_chp_read_info(&chp_info);
chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
}
mutex_unlock(&info_lock);
return rc;
}
/**
* chp_info_get_status - retrieve configure status of a channel-path
* @chpid: channel-path ID
*
* On success, return 0 for standby, 1 for configured, 2 for reserved,
* 3 for not recognized. Return negative error code on error.
*/
int chp_info_get_status(struct chp_id chpid)
{
int rc;
int bit;
rc = info_update();
if (rc)
return rc;
bit = info_bit_num(chpid);
mutex_lock(&info_lock);
if (!chp_test_bit(chp_info.recognized, bit))
rc = CHP_STATUS_NOT_RECOGNIZED;
else if (chp_test_bit(chp_info.configured, bit))
rc = CHP_STATUS_CONFIGURED;
else if (chp_test_bit(chp_info.standby, bit))
rc = CHP_STATUS_STANDBY;
else
rc = CHP_STATUS_RESERVED;
mutex_unlock(&info_lock);
return rc;
}
/* Return configure task for chpid. */
static enum cfg_task_t cfg_get_task(struct chp_id chpid)
{
return chp_cfg_task[chpid.cssid][chpid.id];
}
/* Set configure task for chpid. */
static void cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
{
chp_cfg_task[chpid.cssid][chpid.id] = cfg;
}
/* Fetch the first configure task. Set chpid accordingly. */
static enum cfg_task_t chp_cfg_fetch_task(struct chp_id *chpid)
{
enum cfg_task_t t = cfg_none;
chp_id_for_each(chpid) {
t = cfg_get_task(*chpid);
if (t != cfg_none)
break;
}
return t;
}
/* Perform one configure/deconfigure request. Reschedule work function until
* last request. */
static void cfg_func(struct work_struct *work)
{
struct chp_id chpid;
enum cfg_task_t t;
int rc;
spin_lock(&cfg_lock);
t = chp_cfg_fetch_task(&chpid);
spin_unlock(&cfg_lock);
switch (t) {
case cfg_configure:
rc = sclp_chp_configure(chpid);
if (rc)
CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)="
"%d\n", chpid.cssid, chpid.id, rc);
else {
info_expire();
chsc_chp_online(chpid);
}
break;
case cfg_deconfigure:
rc = sclp_chp_deconfigure(chpid);
if (rc)
CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)="
"%d\n", chpid.cssid, chpid.id, rc);
else {
info_expire();
chsc_chp_offline(chpid);
}
break;
case cfg_none:
/* Get updated information after last change. */
info_update();
wake_up_interruptible(&cfg_wait_queue);
return;
}
spin_lock(&cfg_lock);
if (t == cfg_get_task(chpid))
cfg_set_task(chpid, cfg_none);
spin_unlock(&cfg_lock);
schedule_work(&cfg_work);
}
/**
* chp_cfg_schedule - schedule chpid configuration request
* @chpid: channel-path ID
* @configure: Non-zero for configure, zero for deconfigure
*
* Schedule a channel-path configuration/deconfiguration request.
*/
void chp_cfg_schedule(struct chp_id chpid, int configure)
{
CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
configure);
spin_lock(&cfg_lock);
cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
spin_unlock(&cfg_lock);
schedule_work(&cfg_work);
}
/**
* chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
* @chpid: channel-path ID
*
* Cancel an active channel-path deconfiguration request if it has not yet
* been performed.
*/
void chp_cfg_cancel_deconfigure(struct chp_id chpid)
{
CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
spin_lock(&cfg_lock);
if (cfg_get_task(chpid) == cfg_deconfigure)
cfg_set_task(chpid, cfg_none);
spin_unlock(&cfg_lock);
}
static bool cfg_idle(void)
{
struct chp_id chpid;
enum cfg_task_t t;
spin_lock(&cfg_lock);
t = chp_cfg_fetch_task(&chpid);
spin_unlock(&cfg_lock);
return t == cfg_none;
}
static int cfg_wait_idle(void)
{
if (wait_event_interruptible(cfg_wait_queue, cfg_idle()))
return -ERESTARTSYS;
return 0;
}
static int __init chp_init(void)
{
struct chp_id chpid;
int state, ret;
ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw);
if (ret)
return ret;
INIT_WORK(&cfg_work, cfg_func);
if (info_update())
return 0;
/* Register available channel-paths. */
chp_id_for_each(&chpid) {
state = chp_info_get_status(chpid);
if (state == CHP_STATUS_CONFIGURED ||
state == CHP_STATUS_STANDBY)
chp_new(chpid);
}
return 0;
}
subsys_initcall(chp_init);
| linux-master | drivers/s390/cio/chp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Handling of internal CCW device requests.
*
* Copyright IBM Corp. 2009, 2011
* Author(s): Peter Oberparleiter <[email protected]>
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/types.h>
#include <linux/err.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include "io_sch.h"
#include "cio.h"
#include "device.h"
#include "cio_debug.h"
/**
* lpm_adjust - adjust path mask
* @lpm: path mask to adjust
* @mask: mask of available paths
*
* Shift @lpm right until @lpm and @mask have at least one bit in common or
* until @lpm is zero. Return the resulting lpm.
*/
int lpm_adjust(int lpm, int mask)
{
while (lpm && ((lpm & mask) == 0))
lpm >>= 1;
return lpm;
}
/*
* Adjust path mask to use next path and reset retry count. Return resulting
* path mask.
*/
static u16 ccwreq_next_path(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
if (!req->singlepath) {
req->mask = 0;
goto out;
}
req->retries = req->maxretries;
req->mask = lpm_adjust(req->mask >> 1, req->lpm);
out:
return req->mask;
}
/*
* Clean up device state and report to callback.
*/
static void ccwreq_stop(struct ccw_device *cdev, int rc)
{
struct ccw_request *req = &cdev->private->req;
if (req->done)
return;
req->done = 1;
ccw_device_set_timeout(cdev, 0);
memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
if (rc && rc != -ENODEV && req->drc)
rc = req->drc;
req->callback(cdev, req->data, rc);
}
/*
* (Re-)Start the operation until retries and paths are exhausted.
*/
static void ccwreq_do(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw1 *cp = req->cp;
int rc = -EACCES;
while (req->mask) {
if (req->retries-- == 0) {
/* Retries exhausted, try next path. */
ccwreq_next_path(cdev);
continue;
}
/* Perform start function. */
memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
rc = cio_start(sch, cp, (u8) req->mask);
if (rc == 0) {
/* I/O started successfully. */
ccw_device_set_timeout(cdev, req->timeout);
return;
}
if (rc == -ENODEV) {
/* Permanent device error. */
break;
}
if (rc == -EACCES) {
/* Permant path error. */
ccwreq_next_path(cdev);
continue;
}
/* Temporary improper status. */
rc = cio_clear(sch);
if (rc)
break;
return;
}
ccwreq_stop(cdev, rc);
}
/**
* ccw_request_start - perform I/O request
* @cdev: ccw device
*
* Perform the I/O request specified by cdev->req.
*/
void ccw_request_start(struct ccw_device *cdev)
{
struct ccw_request *req = &cdev->private->req;
if (req->singlepath) {
/* Try all paths twice to counter link flapping. */
req->mask = 0x8080;
} else
req->mask = req->lpm;
req->retries = req->maxretries;
req->mask = lpm_adjust(req->mask, req->lpm);
req->drc = 0;
req->done = 0;
req->cancel = 0;
if (!req->mask)
goto out_nopath;
ccwreq_do(cdev);
return;
out_nopath:
ccwreq_stop(cdev, -EACCES);
}
/**
* ccw_request_cancel - cancel running I/O request
* @cdev: ccw device
*
* Cancel the I/O request specified by cdev->req. Return non-zero if request
* has already finished, zero otherwise.
*/
int ccw_request_cancel(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
int rc;
if (req->done)
return 1;
req->cancel = 1;
rc = cio_clear(sch);
if (rc)
ccwreq_stop(cdev, rc);
return 0;
}
/*
* Return the status of the internal I/O started on the specified ccw device.
* Perform BASIC SENSE if required.
*/
static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
{
struct irb *irb = &cdev->private->dma_area->irb;
struct cmd_scsw *scsw = &irb->scsw.cmd;
enum uc_todo todo;
/* Perform BASIC SENSE if needed. */
if (ccw_device_accumulate_and_sense(cdev, lcirb))
return IO_RUNNING;
/* Check for halt/clear interrupt. */
if (scsw->fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC))
return IO_KILLED;
/* Check for path error. */
if (scsw->cc == 3 || scsw->pno)
return IO_PATH_ERROR;
/* Handle BASIC SENSE data. */
if (irb->esw.esw0.erw.cons) {
CIO_TRACE_EVENT(2, "sensedata");
CIO_HEX_EVENT(2, &cdev->private->dev_id,
sizeof(struct ccw_dev_id));
CIO_HEX_EVENT(2, &cdev->private->dma_area->irb.ecw,
SENSE_MAX_COUNT);
/* Check for command reject. */
if (irb->ecw[0] & SNS0_CMD_REJECT)
return IO_REJECTED;
/* Ask the driver what to do */
if (cdev->drv && cdev->drv->uc_handler) {
todo = cdev->drv->uc_handler(cdev, lcirb);
CIO_TRACE_EVENT(2, "uc_response");
CIO_HEX_EVENT(2, &todo, sizeof(todo));
switch (todo) {
case UC_TODO_RETRY:
return IO_STATUS_ERROR;
case UC_TODO_RETRY_ON_NEW_PATH:
return IO_PATH_ERROR;
case UC_TODO_STOP:
return IO_REJECTED;
default:
return IO_STATUS_ERROR;
}
}
/* Assume that unexpected SENSE data implies an error. */
return IO_STATUS_ERROR;
}
/* Check for channel errors. */
if (scsw->cstat != 0)
return IO_STATUS_ERROR;
/* Check for device errors. */
if (scsw->dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
return IO_STATUS_ERROR;
/* Check for final state. */
if (!(scsw->dstat & DEV_STAT_DEV_END))
return IO_RUNNING;
/* Check for other improper status. */
if (scsw->cc == 1 && (scsw->stctl & SCSW_STCTL_ALERT_STATUS))
return IO_STATUS_ERROR;
return IO_DONE;
}
/*
* Log ccw request status.
*/
static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
{
struct ccw_request *req = &cdev->private->req;
struct {
struct ccw_dev_id dev_id;
u16 retries;
u8 lpm;
u8 status;
} __attribute__ ((packed)) data;
data.dev_id = cdev->private->dev_id;
data.retries = req->retries;
data.lpm = (u8) req->mask;
data.status = (u8) status;
CIO_TRACE_EVENT(2, "reqstat");
CIO_HEX_EVENT(2, &data, sizeof(data));
}
/**
* ccw_request_handler - interrupt handler for I/O request procedure.
* @cdev: ccw device
*
* Handle interrupt during I/O request procedure.
*/
void ccw_request_handler(struct ccw_device *cdev)
{
struct irb *irb = this_cpu_ptr(&cio_irb);
struct ccw_request *req = &cdev->private->req;
enum io_status status;
int rc = -EOPNOTSUPP;
/* Check status of I/O request. */
status = ccwreq_status(cdev, irb);
if (req->filter)
status = req->filter(cdev, req->data, irb, status);
if (status != IO_RUNNING)
ccw_device_set_timeout(cdev, 0);
if (status != IO_DONE && status != IO_RUNNING)
ccwreq_log_status(cdev, status);
switch (status) {
case IO_DONE:
break;
case IO_RUNNING:
return;
case IO_REJECTED:
goto err;
case IO_PATH_ERROR:
goto out_next_path;
case IO_STATUS_ERROR:
goto out_restart;
case IO_KILLED:
/* Check if request was cancelled on purpose. */
if (req->cancel) {
rc = -EIO;
goto err;
}
goto out_restart;
}
/* Check back with request initiator. */
if (!req->check)
goto out;
switch (req->check(cdev, req->data)) {
case 0:
break;
case -EAGAIN:
goto out_restart;
case -EACCES:
goto out_next_path;
default:
goto err;
}
out:
ccwreq_stop(cdev, 0);
return;
out_next_path:
/* Try next path and restart I/O. */
if (!ccwreq_next_path(cdev)) {
rc = -EACCES;
goto err;
}
out_restart:
/* Restart. */
ccwreq_do(cdev);
return;
err:
ccwreq_stop(cdev, rc);
}
/**
* ccw_request_timeout - timeout handler for I/O request procedure
* @cdev: ccw device
*
* Handle timeout during I/O request procedure.
*/
void ccw_request_timeout(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct ccw_request *req = &cdev->private->req;
int rc = -ENODEV, chp;
if (cio_update_schib(sch))
goto err;
for (chp = 0; chp < 8; chp++) {
if ((0x80 >> chp) & sch->schib.pmcw.lpum)
pr_warn("%s: No interrupt was received within %lus (CS=%02x, DS=%02x, CHPID=%x.%02x)\n",
dev_name(&cdev->dev), req->timeout / HZ,
scsw_cstat(&sch->schib.scsw),
scsw_dstat(&sch->schib.scsw),
sch->schid.cssid,
sch->schib.pmcw.chpid[chp]);
}
if (!ccwreq_next_path(cdev)) {
/* set the final return code for this request */
req->drc = -ETIME;
}
rc = cio_clear(sch);
if (rc)
goto err;
return;
err:
ccwreq_stop(cdev, rc);
}
/**
* ccw_request_notoper - notoper handler for I/O request procedure
* @cdev: ccw device
*
* Handle notoper during I/O request procedure.
*/
void ccw_request_notoper(struct ccw_device *cdev)
{
ccwreq_stop(cdev, -ENODEV);
}
| linux-master | drivers/s390/cio/ccwreq.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Linux on zSeries Channel Measurement Facility support
*
* Copyright IBM Corp. 2000, 2006
*
* Authors: Arnd Bergmann <[email protected]>
* Cornelia Huck <[email protected]>
*
* original idea from Natarajan Krishnaswami <[email protected]>
*/
#define KMSG_COMPONENT "cio"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/memblock.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/timex.h> /* get_tod_clock() */
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/cmb.h>
#include <asm/div64.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "chsc.h"
/*
* parameter to enable cmf during boot, possible uses are:
* "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
* used on any subchannel
* "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
* <num> subchannel, where <num> is an integer
* between 1 and 65535, default is 1024
*/
#define ARGSTRING "s390cmf"
/* indices for READCMB */
enum cmb_index {
avg_utilization = -1,
/* basic and exended format: */
cmb_ssch_rsch_count = 0,
cmb_sample_count,
cmb_device_connect_time,
cmb_function_pending_time,
cmb_device_disconnect_time,
cmb_control_unit_queuing_time,
cmb_device_active_only_time,
/* extended format only: */
cmb_device_busy_time,
cmb_initial_command_response_time,
};
/**
* enum cmb_format - types of supported measurement block formats
*
* @CMF_BASIC: traditional channel measurement blocks supported
* by all machines that we run on
* @CMF_EXTENDED: improved format that was introduced with the z990
* machine
* @CMF_AUTODETECT: default: use extended format when running on a machine
* supporting extended format, otherwise fall back to
* basic format
*/
enum cmb_format {
CMF_BASIC,
CMF_EXTENDED,
CMF_AUTODETECT = -1,
};
/*
* format - actual format for all measurement blocks
*
* The format module parameter can be set to a value of 0 (zero)
* or 1, indicating basic or extended format as described for
* enum cmb_format.
*/
static int format = CMF_AUTODETECT;
module_param(format, bint, 0444);
/**
* struct cmb_operations - functions to use depending on cmb_format
*
* Most of these functions operate on a struct ccw_device. There is only
* one instance of struct cmb_operations because the format of the measurement
* data is guaranteed to be the same for every ccw_device.
*
* @alloc: allocate memory for a channel measurement block,
* either with the help of a special pool or with kmalloc
* @free: free memory allocated with @alloc
* @set: enable or disable measurement
* @read: read a measurement entry at an index
* @readall: read a measurement block in a common format
* @reset: clear the data in the associated measurement block and
* reset its time stamp
*/
struct cmb_operations {
int (*alloc) (struct ccw_device *);
void (*free) (struct ccw_device *);
int (*set) (struct ccw_device *, u32);
u64 (*read) (struct ccw_device *, int);
int (*readall)(struct ccw_device *, struct cmbdata *);
void (*reset) (struct ccw_device *);
/* private: */
struct attribute_group *attr_group;
};
static struct cmb_operations *cmbops;
struct cmb_data {
void *hw_block; /* Pointer to block updated by hardware */
void *last_block; /* Last changed block copied from hardware block */
int size; /* Size of hw_block and last_block */
unsigned long long last_update; /* when last_block was updated */
};
/*
* Our user interface is designed in terms of nanoseconds,
* while the hardware measures total times in its own
* unit.
*/
static inline u64 time_to_nsec(u32 value)
{
return ((u64)value) * 128000ull;
}
/*
* Users are usually interested in average times,
* not accumulated time.
* This also helps us with atomicity problems
* when reading sinlge values.
*/
static inline u64 time_to_avg_nsec(u32 value, u32 count)
{
u64 ret;
/* no samples yet, avoid division by 0 */
if (count == 0)
return 0;
/* value comes in units of 128 µsec */
ret = time_to_nsec(value);
do_div(ret, count);
return ret;
}
#define CMF_OFF 0
#define CMF_ON 2
/*
* Activate or deactivate the channel monitor. When area is NULL,
* the monitor is deactivated. The channel monitor needs to
* be active in order to measure subchannels, which also need
* to be enabled.
*/
static inline void cmf_activate(void *area, unsigned int onoff)
{
/* activate channel measurement */
asm volatile(
" lgr 1,%[r1]\n"
" lgr 2,%[mbo]\n"
" schm\n"
:
: [r1] "d" ((unsigned long)onoff), [mbo] "d" (area)
: "1", "2");
}
static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
unsigned long address)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
int ret;
sch->config.mme = mme;
sch->config.mbfc = mbfc;
/* address can be either a block address or a block index */
if (mbfc)
sch->config.mba = address;
else
sch->config.mbi = address;
ret = cio_commit_config(sch);
if (!mme && ret == -ENODEV) {
/*
* The task was to disable measurement block updates but
* the subchannel is already gone. Report success.
*/
ret = 0;
}
return ret;
}
struct set_schib_struct {
u32 mme;
int mbfc;
unsigned long address;
wait_queue_head_t wait;
int ret;
};
#define CMF_PENDING 1
#define SET_SCHIB_TIMEOUT (10 * HZ)
static int set_schib_wait(struct ccw_device *cdev, u32 mme,
int mbfc, unsigned long address)
{
struct set_schib_struct set_data;
int ret = -ENODEV;
spin_lock_irq(cdev->ccwlock);
if (!cdev->private->cmb)
goto out;
ret = set_schib(cdev, mme, mbfc, address);
if (ret != -EBUSY)
goto out;
/* if the device is not online, don't even try again */
if (cdev->private->state != DEV_STATE_ONLINE)
goto out;
init_waitqueue_head(&set_data.wait);
set_data.mme = mme;
set_data.mbfc = mbfc;
set_data.address = address;
set_data.ret = CMF_PENDING;
cdev->private->state = DEV_STATE_CMFCHANGE;
cdev->private->cmb_wait = &set_data;
spin_unlock_irq(cdev->ccwlock);
ret = wait_event_interruptible_timeout(set_data.wait,
set_data.ret != CMF_PENDING,
SET_SCHIB_TIMEOUT);
spin_lock_irq(cdev->ccwlock);
if (ret <= 0) {
if (set_data.ret == CMF_PENDING) {
set_data.ret = (ret == 0) ? -ETIME : ret;
if (cdev->private->state == DEV_STATE_CMFCHANGE)
cdev->private->state = DEV_STATE_ONLINE;
}
}
cdev->private->cmb_wait = NULL;
ret = set_data.ret;
out:
spin_unlock_irq(cdev->ccwlock);
return ret;
}
void retry_set_schib(struct ccw_device *cdev)
{
struct set_schib_struct *set_data = cdev->private->cmb_wait;
if (!set_data)
return;
set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
set_data->address);
wake_up(&set_data->wait);
}
static int cmf_copy_block(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
struct cmb_data *cmb_data;
void *hw_block;
if (cio_update_schib(sch))
return -ENODEV;
if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
/* Don't copy if a start function is in progress. */
if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
(scsw_actl(&sch->schib.scsw) &
(SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
(!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
return -EBUSY;
}
cmb_data = cdev->private->cmb;
hw_block = cmb_data->hw_block;
memcpy(cmb_data->last_block, hw_block, cmb_data->size);
cmb_data->last_update = get_tod_clock();
return 0;
}
struct copy_block_struct {
wait_queue_head_t wait;
int ret;
};
static int cmf_cmb_copy_wait(struct ccw_device *cdev)
{
struct copy_block_struct copy_block;
int ret = -ENODEV;
spin_lock_irq(cdev->ccwlock);
if (!cdev->private->cmb)
goto out;
ret = cmf_copy_block(cdev);
if (ret != -EBUSY)
goto out;
if (cdev->private->state != DEV_STATE_ONLINE)
goto out;
init_waitqueue_head(©_block.wait);
copy_block.ret = CMF_PENDING;
cdev->private->state = DEV_STATE_CMFUPDATE;
cdev->private->cmb_wait = ©_block;
spin_unlock_irq(cdev->ccwlock);
ret = wait_event_interruptible(copy_block.wait,
copy_block.ret != CMF_PENDING);
spin_lock_irq(cdev->ccwlock);
if (ret) {
if (copy_block.ret == CMF_PENDING) {
copy_block.ret = -ERESTARTSYS;
if (cdev->private->state == DEV_STATE_CMFUPDATE)
cdev->private->state = DEV_STATE_ONLINE;
}
}
cdev->private->cmb_wait = NULL;
ret = copy_block.ret;
out:
spin_unlock_irq(cdev->ccwlock);
return ret;
}
void cmf_retry_copy_block(struct ccw_device *cdev)
{
struct copy_block_struct *copy_block = cdev->private->cmb_wait;
if (!copy_block)
return;
copy_block->ret = cmf_copy_block(cdev);
wake_up(©_block->wait);
}
static void cmf_generic_reset(struct ccw_device *cdev)
{
struct cmb_data *cmb_data;
spin_lock_irq(cdev->ccwlock);
cmb_data = cdev->private->cmb;
if (cmb_data) {
memset(cmb_data->last_block, 0, cmb_data->size);
/*
* Need to reset hw block as well to make the hardware start
* from 0 again.
*/
memset(cmb_data->hw_block, 0, cmb_data->size);
cmb_data->last_update = 0;
}
cdev->private->cmb_start_time = get_tod_clock();
spin_unlock_irq(cdev->ccwlock);
}
/**
* struct cmb_area - container for global cmb data
*
* @mem: pointer to CMBs (only in basic measurement mode)
* @list: contains a linked list of all subchannels
* @num_channels: number of channels to be measured
* @lock: protect concurrent access to @mem and @list
*/
struct cmb_area {
struct cmb *mem;
struct list_head list;
int num_channels;
spinlock_t lock;
};
static struct cmb_area cmb_area = {
.lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
.list = LIST_HEAD_INIT(cmb_area.list),
.num_channels = 1024,
};
/* ****** old style CMB handling ********/
/*
* Basic channel measurement blocks are allocated in one contiguous
* block of memory, which can not be moved as long as any channel
* is active. Therefore, a maximum number of subchannels needs to
* be defined somewhere. This is a module parameter, defaulting to
* a reasonable value of 1024, or 32 kb of memory.
* Current kernels don't allow kmalloc with more than 128kb, so the
* maximum is 4096.
*/
module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
/**
* struct cmb - basic channel measurement block
* @ssch_rsch_count: number of ssch and rsch
* @sample_count: number of samples
* @device_connect_time: time of device connect
* @function_pending_time: time of function pending
* @device_disconnect_time: time of device disconnect
* @control_unit_queuing_time: time of control unit queuing
* @device_active_only_time: time of device active only
* @reserved: unused in basic measurement mode
*
* The measurement block as used by the hardware. The fields are described
* further in z/Architecture Principles of Operation, chapter 17.
*
* The cmb area made up from these blocks must be a contiguous array and may
* not be reallocated or freed.
* Only one cmb area can be present in the system.
*/
struct cmb {
u16 ssch_rsch_count;
u16 sample_count;
u32 device_connect_time;
u32 function_pending_time;
u32 device_disconnect_time;
u32 control_unit_queuing_time;
u32 device_active_only_time;
u32 reserved[2];
};
/*
* Insert a single device into the cmb_area list.
* Called with cmb_area.lock held from alloc_cmb.
*/
static int alloc_cmb_single(struct ccw_device *cdev,
struct cmb_data *cmb_data)
{
struct cmb *cmb;
struct ccw_device_private *node;
int ret;
spin_lock_irq(cdev->ccwlock);
if (!list_empty(&cdev->private->cmb_list)) {
ret = -EBUSY;
goto out;
}
/*
* Find first unused cmb in cmb_area.mem.
* This is a little tricky: cmb_area.list
* remains sorted by ->cmb->hw_data pointers.
*/
cmb = cmb_area.mem;
list_for_each_entry(node, &cmb_area.list, cmb_list) {
struct cmb_data *data;
data = node->cmb;
if ((struct cmb*)data->hw_block > cmb)
break;
cmb++;
}
if (cmb - cmb_area.mem >= cmb_area.num_channels) {
ret = -ENOMEM;
goto out;
}
/* insert new cmb */
list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
cmb_data->hw_block = cmb;
cdev->private->cmb = cmb_data;
ret = 0;
out:
spin_unlock_irq(cdev->ccwlock);
return ret;
}
static int alloc_cmb(struct ccw_device *cdev)
{
int ret;
struct cmb *mem;
ssize_t size;
struct cmb_data *cmb_data;
/* Allocate private cmb_data. */
cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
if (!cmb_data)
return -ENOMEM;
cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
if (!cmb_data->last_block) {
kfree(cmb_data);
return -ENOMEM;
}
cmb_data->size = sizeof(struct cmb);
spin_lock(&cmb_area.lock);
if (!cmb_area.mem) {
/* there is no user yet, so we need a new area */
size = sizeof(struct cmb) * cmb_area.num_channels;
WARN_ON(!list_empty(&cmb_area.list));
spin_unlock(&cmb_area.lock);
mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
get_order(size));
spin_lock(&cmb_area.lock);
if (cmb_area.mem) {
/* ok, another thread was faster */
free_pages((unsigned long)mem, get_order(size));
} else if (!mem) {
/* no luck */
ret = -ENOMEM;
goto out;
} else {
/* everything ok */
memset(mem, 0, size);
cmb_area.mem = mem;
cmf_activate(cmb_area.mem, CMF_ON);
}
}
/* do the actual allocation */
ret = alloc_cmb_single(cdev, cmb_data);
out:
spin_unlock(&cmb_area.lock);
if (ret) {
kfree(cmb_data->last_block);
kfree(cmb_data);
}
return ret;
}
static void free_cmb(struct ccw_device *cdev)
{
struct ccw_device_private *priv;
struct cmb_data *cmb_data;
spin_lock(&cmb_area.lock);
spin_lock_irq(cdev->ccwlock);
priv = cdev->private;
cmb_data = priv->cmb;
priv->cmb = NULL;
if (cmb_data)
kfree(cmb_data->last_block);
kfree(cmb_data);
list_del_init(&priv->cmb_list);
if (list_empty(&cmb_area.list)) {
ssize_t size;
size = sizeof(struct cmb) * cmb_area.num_channels;
cmf_activate(NULL, CMF_OFF);
free_pages((unsigned long)cmb_area.mem, get_order(size));
cmb_area.mem = NULL;
}
spin_unlock_irq(cdev->ccwlock);
spin_unlock(&cmb_area.lock);
}
static int set_cmb(struct ccw_device *cdev, u32 mme)
{
u16 offset;
struct cmb_data *cmb_data;
unsigned long flags;
spin_lock_irqsave(cdev->ccwlock, flags);
if (!cdev->private->cmb) {
spin_unlock_irqrestore(cdev->ccwlock, flags);
return -EINVAL;
}
cmb_data = cdev->private->cmb;
offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
spin_unlock_irqrestore(cdev->ccwlock, flags);
return set_schib_wait(cdev, mme, 0, offset);
}
/* calculate utilization in 0.1 percent units */
static u64 __cmb_utilization(u64 device_connect_time, u64 function_pending_time,
u64 device_disconnect_time, u64 start_time)
{
u64 utilization, elapsed_time;
utilization = time_to_nsec(device_connect_time +
function_pending_time +
device_disconnect_time);
elapsed_time = get_tod_clock() - start_time;
elapsed_time = tod_to_ns(elapsed_time);
elapsed_time /= 1000;
return elapsed_time ? (utilization / elapsed_time) : 0;
}
static u64 read_cmb(struct ccw_device *cdev, int index)
{
struct cmb_data *cmb_data;
unsigned long flags;
struct cmb *cmb;
u64 ret = 0;
u32 val;
spin_lock_irqsave(cdev->ccwlock, flags);
cmb_data = cdev->private->cmb;
if (!cmb_data)
goto out;
cmb = cmb_data->hw_block;
switch (index) {
case avg_utilization:
ret = __cmb_utilization(cmb->device_connect_time,
cmb->function_pending_time,
cmb->device_disconnect_time,
cdev->private->cmb_start_time);
goto out;
case cmb_ssch_rsch_count:
ret = cmb->ssch_rsch_count;
goto out;
case cmb_sample_count:
ret = cmb->sample_count;
goto out;
case cmb_device_connect_time:
val = cmb->device_connect_time;
break;
case cmb_function_pending_time:
val = cmb->function_pending_time;
break;
case cmb_device_disconnect_time:
val = cmb->device_disconnect_time;
break;
case cmb_control_unit_queuing_time:
val = cmb->control_unit_queuing_time;
break;
case cmb_device_active_only_time:
val = cmb->device_active_only_time;
break;
default:
goto out;
}
ret = time_to_avg_nsec(val, cmb->sample_count);
out:
spin_unlock_irqrestore(cdev->ccwlock, flags);
return ret;
}
static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
{
struct cmb *cmb;
struct cmb_data *cmb_data;
u64 time;
unsigned long flags;
int ret;
ret = cmf_cmb_copy_wait(cdev);
if (ret < 0)
return ret;
spin_lock_irqsave(cdev->ccwlock, flags);
cmb_data = cdev->private->cmb;
if (!cmb_data) {
ret = -ENODEV;
goto out;
}
if (cmb_data->last_update == 0) {
ret = -EAGAIN;
goto out;
}
cmb = cmb_data->last_block;
time = cmb_data->last_update - cdev->private->cmb_start_time;
memset(data, 0, sizeof(struct cmbdata));
/* we only know values before device_busy_time */
data->size = offsetof(struct cmbdata, device_busy_time);
data->elapsed_time = tod_to_ns(time);
/* copy data to new structure */
data->ssch_rsch_count = cmb->ssch_rsch_count;
data->sample_count = cmb->sample_count;
/* time fields are converted to nanoseconds while copying */
data->device_connect_time = time_to_nsec(cmb->device_connect_time);
data->function_pending_time = time_to_nsec(cmb->function_pending_time);
data->device_disconnect_time =
time_to_nsec(cmb->device_disconnect_time);
data->control_unit_queuing_time
= time_to_nsec(cmb->control_unit_queuing_time);
data->device_active_only_time
= time_to_nsec(cmb->device_active_only_time);
ret = 0;
out:
spin_unlock_irqrestore(cdev->ccwlock, flags);
return ret;
}
static void reset_cmb(struct ccw_device *cdev)
{
cmf_generic_reset(cdev);
}
static int cmf_enabled(struct ccw_device *cdev)
{
int enabled;
spin_lock_irq(cdev->ccwlock);
enabled = !!cdev->private->cmb;
spin_unlock_irq(cdev->ccwlock);
return enabled;
}
static struct attribute_group cmf_attr_group;
static struct cmb_operations cmbops_basic = {
.alloc = alloc_cmb,
.free = free_cmb,
.set = set_cmb,
.read = read_cmb,
.readall = readall_cmb,
.reset = reset_cmb,
.attr_group = &cmf_attr_group,
};
/* ******** extended cmb handling ********/
/**
* struct cmbe - extended channel measurement block
* @ssch_rsch_count: number of ssch and rsch
* @sample_count: number of samples
* @device_connect_time: time of device connect
* @function_pending_time: time of function pending
* @device_disconnect_time: time of device disconnect
* @control_unit_queuing_time: time of control unit queuing
* @device_active_only_time: time of device active only
* @device_busy_time: time of device busy
* @initial_command_response_time: initial command response time
* @reserved: unused
*
* The measurement block as used by the hardware. May be in any 64 bit physical
* location.
* The fields are described further in z/Architecture Principles of Operation,
* third edition, chapter 17.
*/
struct cmbe {
u32 ssch_rsch_count;
u32 sample_count;
u32 device_connect_time;
u32 function_pending_time;
u32 device_disconnect_time;
u32 control_unit_queuing_time;
u32 device_active_only_time;
u32 device_busy_time;
u32 initial_command_response_time;
u32 reserved[7];
} __packed __aligned(64);
static struct kmem_cache *cmbe_cache;
static int alloc_cmbe(struct ccw_device *cdev)
{
struct cmb_data *cmb_data;
struct cmbe *cmbe;
int ret = -ENOMEM;
cmbe = kmem_cache_zalloc(cmbe_cache, GFP_KERNEL);
if (!cmbe)
return ret;
cmb_data = kzalloc(sizeof(*cmb_data), GFP_KERNEL);
if (!cmb_data)
goto out_free;
cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
if (!cmb_data->last_block)
goto out_free;
cmb_data->size = sizeof(*cmbe);
cmb_data->hw_block = cmbe;
spin_lock(&cmb_area.lock);
spin_lock_irq(cdev->ccwlock);
if (cdev->private->cmb)
goto out_unlock;
cdev->private->cmb = cmb_data;
/* activate global measurement if this is the first channel */
if (list_empty(&cmb_area.list))
cmf_activate(NULL, CMF_ON);
list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
spin_unlock_irq(cdev->ccwlock);
spin_unlock(&cmb_area.lock);
return 0;
out_unlock:
spin_unlock_irq(cdev->ccwlock);
spin_unlock(&cmb_area.lock);
ret = -EBUSY;
out_free:
if (cmb_data)
kfree(cmb_data->last_block);
kfree(cmb_data);
kmem_cache_free(cmbe_cache, cmbe);
return ret;
}
static void free_cmbe(struct ccw_device *cdev)
{
struct cmb_data *cmb_data;
spin_lock(&cmb_area.lock);
spin_lock_irq(cdev->ccwlock);
cmb_data = cdev->private->cmb;
cdev->private->cmb = NULL;
if (cmb_data) {
kfree(cmb_data->last_block);
kmem_cache_free(cmbe_cache, cmb_data->hw_block);
}
kfree(cmb_data);
/* deactivate global measurement if this is the last channel */
list_del_init(&cdev->private->cmb_list);
if (list_empty(&cmb_area.list))
cmf_activate(NULL, CMF_OFF);
spin_unlock_irq(cdev->ccwlock);
spin_unlock(&cmb_area.lock);
}
static int set_cmbe(struct ccw_device *cdev, u32 mme)
{
unsigned long mba;
struct cmb_data *cmb_data;
unsigned long flags;
spin_lock_irqsave(cdev->ccwlock, flags);
if (!cdev->private->cmb) {
spin_unlock_irqrestore(cdev->ccwlock, flags);
return -EINVAL;
}
cmb_data = cdev->private->cmb;
mba = mme ? (unsigned long) cmb_data->hw_block : 0;
spin_unlock_irqrestore(cdev->ccwlock, flags);
return set_schib_wait(cdev, mme, 1, mba);
}
static u64 read_cmbe(struct ccw_device *cdev, int index)
{
struct cmb_data *cmb_data;
unsigned long flags;
struct cmbe *cmb;
u64 ret = 0;
u32 val;
spin_lock_irqsave(cdev->ccwlock, flags);
cmb_data = cdev->private->cmb;
if (!cmb_data)
goto out;
cmb = cmb_data->hw_block;
switch (index) {
case avg_utilization:
ret = __cmb_utilization(cmb->device_connect_time,
cmb->function_pending_time,
cmb->device_disconnect_time,
cdev->private->cmb_start_time);
goto out;
case cmb_ssch_rsch_count:
ret = cmb->ssch_rsch_count;
goto out;
case cmb_sample_count:
ret = cmb->sample_count;
goto out;
case cmb_device_connect_time:
val = cmb->device_connect_time;
break;
case cmb_function_pending_time:
val = cmb->function_pending_time;
break;
case cmb_device_disconnect_time:
val = cmb->device_disconnect_time;
break;
case cmb_control_unit_queuing_time:
val = cmb->control_unit_queuing_time;
break;
case cmb_device_active_only_time:
val = cmb->device_active_only_time;
break;
case cmb_device_busy_time:
val = cmb->device_busy_time;
break;
case cmb_initial_command_response_time:
val = cmb->initial_command_response_time;
break;
default:
goto out;
}
ret = time_to_avg_nsec(val, cmb->sample_count);
out:
spin_unlock_irqrestore(cdev->ccwlock, flags);
return ret;
}
static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
{
struct cmbe *cmb;
struct cmb_data *cmb_data;
u64 time;
unsigned long flags;
int ret;
ret = cmf_cmb_copy_wait(cdev);
if (ret < 0)
return ret;
spin_lock_irqsave(cdev->ccwlock, flags);
cmb_data = cdev->private->cmb;
if (!cmb_data) {
ret = -ENODEV;
goto out;
}
if (cmb_data->last_update == 0) {
ret = -EAGAIN;
goto out;
}
time = cmb_data->last_update - cdev->private->cmb_start_time;
memset (data, 0, sizeof(struct cmbdata));
/* we only know values before device_busy_time */
data->size = offsetof(struct cmbdata, device_busy_time);
data->elapsed_time = tod_to_ns(time);
cmb = cmb_data->last_block;
/* copy data to new structure */
data->ssch_rsch_count = cmb->ssch_rsch_count;
data->sample_count = cmb->sample_count;
/* time fields are converted to nanoseconds while copying */
data->device_connect_time = time_to_nsec(cmb->device_connect_time);
data->function_pending_time = time_to_nsec(cmb->function_pending_time);
data->device_disconnect_time =
time_to_nsec(cmb->device_disconnect_time);
data->control_unit_queuing_time
= time_to_nsec(cmb->control_unit_queuing_time);
data->device_active_only_time
= time_to_nsec(cmb->device_active_only_time);
data->device_busy_time = time_to_nsec(cmb->device_busy_time);
data->initial_command_response_time
= time_to_nsec(cmb->initial_command_response_time);
ret = 0;
out:
spin_unlock_irqrestore(cdev->ccwlock, flags);
return ret;
}
static void reset_cmbe(struct ccw_device *cdev)
{
cmf_generic_reset(cdev);
}
static struct attribute_group cmf_attr_group_ext;
static struct cmb_operations cmbops_extended = {
.alloc = alloc_cmbe,
.free = free_cmbe,
.set = set_cmbe,
.read = read_cmbe,
.readall = readall_cmbe,
.reset = reset_cmbe,
.attr_group = &cmf_attr_group_ext,
};
static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
{
return sprintf(buf, "%lld\n",
(unsigned long long) cmf_read(to_ccwdev(dev), idx));
}
static ssize_t cmb_show_avg_sample_interval(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
unsigned long count;
long interval;
count = cmf_read(cdev, cmb_sample_count);
spin_lock_irq(cdev->ccwlock);
if (count) {
interval = get_tod_clock() - cdev->private->cmb_start_time;
interval = tod_to_ns(interval);
interval /= count;
} else
interval = -1;
spin_unlock_irq(cdev->ccwlock);
return sprintf(buf, "%ld\n", interval);
}
static ssize_t cmb_show_avg_utilization(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization);
return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
}
#define cmf_attr(name) \
static ssize_t show_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ return cmb_show_attr((dev), buf, cmb_##name); } \
static DEVICE_ATTR(name, 0444, show_##name, NULL);
#define cmf_attr_avg(name) \
static ssize_t show_avg_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ return cmb_show_attr((dev), buf, cmb_##name); } \
static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
cmf_attr(ssch_rsch_count);
cmf_attr(sample_count);
cmf_attr_avg(device_connect_time);
cmf_attr_avg(function_pending_time);
cmf_attr_avg(device_disconnect_time);
cmf_attr_avg(control_unit_queuing_time);
cmf_attr_avg(device_active_only_time);
cmf_attr_avg(device_busy_time);
cmf_attr_avg(initial_command_response_time);
static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
NULL);
static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
static struct attribute *cmf_attributes[] = {
&dev_attr_avg_sample_interval.attr,
&dev_attr_avg_utilization.attr,
&dev_attr_ssch_rsch_count.attr,
&dev_attr_sample_count.attr,
&dev_attr_avg_device_connect_time.attr,
&dev_attr_avg_function_pending_time.attr,
&dev_attr_avg_device_disconnect_time.attr,
&dev_attr_avg_control_unit_queuing_time.attr,
&dev_attr_avg_device_active_only_time.attr,
NULL,
};
static struct attribute_group cmf_attr_group = {
.name = "cmf",
.attrs = cmf_attributes,
};
static struct attribute *cmf_attributes_ext[] = {
&dev_attr_avg_sample_interval.attr,
&dev_attr_avg_utilization.attr,
&dev_attr_ssch_rsch_count.attr,
&dev_attr_sample_count.attr,
&dev_attr_avg_device_connect_time.attr,
&dev_attr_avg_function_pending_time.attr,
&dev_attr_avg_device_disconnect_time.attr,
&dev_attr_avg_control_unit_queuing_time.attr,
&dev_attr_avg_device_active_only_time.attr,
&dev_attr_avg_device_busy_time.attr,
&dev_attr_avg_initial_command_response_time.attr,
NULL,
};
static struct attribute_group cmf_attr_group_ext = {
.name = "cmf",
.attrs = cmf_attributes_ext,
};
static ssize_t cmb_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
return sprintf(buf, "%d\n", cmf_enabled(cdev));
}
static ssize_t cmb_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t c)
{
struct ccw_device *cdev = to_ccwdev(dev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
switch (val) {
case 0:
ret = disable_cmf(cdev);
break;
case 1:
ret = enable_cmf(cdev);
break;
default:
ret = -EINVAL;
}
return ret ? ret : c;
}
DEVICE_ATTR_RW(cmb_enable);
/**
* enable_cmf() - switch on the channel measurement for a specific device
* @cdev: The ccw device to be enabled
*
* Enable channel measurements for @cdev. If this is called on a device
* for which channel measurement is already enabled a reset of the
* measurement data is triggered.
* Returns: %0 for success or a negative error value.
* Context:
* non-atomic
*/
int enable_cmf(struct ccw_device *cdev)
{
int ret = 0;
device_lock(&cdev->dev);
if (cmf_enabled(cdev)) {
cmbops->reset(cdev);
goto out_unlock;
}
get_device(&cdev->dev);
ret = cmbops->alloc(cdev);
if (ret)
goto out;
cmbops->reset(cdev);
ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
if (ret) {
cmbops->free(cdev);
goto out;
}
ret = cmbops->set(cdev, 2);
if (ret) {
sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
cmbops->free(cdev);
}
out:
if (ret)
put_device(&cdev->dev);
out_unlock:
device_unlock(&cdev->dev);
return ret;
}
/**
* __disable_cmf() - switch off the channel measurement for a specific device
* @cdev: The ccw device to be disabled
*
* Returns: %0 for success or a negative error value.
*
* Context:
* non-atomic, device_lock() held.
*/
int __disable_cmf(struct ccw_device *cdev)
{
int ret;
ret = cmbops->set(cdev, 0);
if (ret)
return ret;
sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
cmbops->free(cdev);
put_device(&cdev->dev);
return ret;
}
/**
* disable_cmf() - switch off the channel measurement for a specific device
* @cdev: The ccw device to be disabled
*
* Returns: %0 for success or a negative error value.
*
* Context:
* non-atomic
*/
int disable_cmf(struct ccw_device *cdev)
{
int ret;
device_lock(&cdev->dev);
ret = __disable_cmf(cdev);
device_unlock(&cdev->dev);
return ret;
}
/**
* cmf_read() - read one value from the current channel measurement block
* @cdev: the channel to be read
* @index: the index of the value to be read
*
* Returns: The value read or %0 if the value cannot be read.
*
* Context:
* any
*/
u64 cmf_read(struct ccw_device *cdev, int index)
{
return cmbops->read(cdev, index);
}
/**
* cmf_readall() - read the current channel measurement block
* @cdev: the channel to be read
* @data: a pointer to a data block that will be filled
*
* Returns: %0 on success, a negative error value otherwise.
*
* Context:
* any
*/
int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
{
return cmbops->readall(cdev, data);
}
/* Reenable cmf when a disconnected device becomes available again. */
int cmf_reenable(struct ccw_device *cdev)
{
cmbops->reset(cdev);
return cmbops->set(cdev, 2);
}
/**
* cmf_reactivate() - reactivate measurement block updates
*
* Use this during resume from hibernate.
*/
void cmf_reactivate(void)
{
spin_lock(&cmb_area.lock);
if (!list_empty(&cmb_area.list))
cmf_activate(cmb_area.mem, CMF_ON);
spin_unlock(&cmb_area.lock);
}
static int __init init_cmbe(void)
{
cmbe_cache = kmem_cache_create("cmbe_cache", sizeof(struct cmbe),
__alignof__(struct cmbe), 0, NULL);
return cmbe_cache ? 0 : -ENOMEM;
}
static int __init init_cmf(void)
{
char *format_string;
char *detect_string;
int ret;
/*
* If the user did not give a parameter, see if we are running on a
* machine supporting extended measurement blocks, otherwise fall back
* to basic mode.
*/
if (format == CMF_AUTODETECT) {
if (!css_general_characteristics.ext_mb) {
format = CMF_BASIC;
} else {
format = CMF_EXTENDED;
}
detect_string = "autodetected";
} else {
detect_string = "parameter";
}
switch (format) {
case CMF_BASIC:
format_string = "basic";
cmbops = &cmbops_basic;
break;
case CMF_EXTENDED:
format_string = "extended";
cmbops = &cmbops_extended;
ret = init_cmbe();
if (ret)
return ret;
break;
default:
return -EINVAL;
}
pr_info("Channel measurement facility initialized using format "
"%s (mode %s)\n", format_string, detect_string);
return 0;
}
device_initcall(init_cmf);
EXPORT_SYMBOL_GPL(enable_cmf);
EXPORT_SYMBOL_GPL(disable_cmf);
EXPORT_SYMBOL_GPL(cmf_read);
EXPORT_SYMBOL_GPL(cmf_readall);
| linux-master | drivers/s390/cio/cmf.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <[email protected]>
* Horst Hummel <[email protected]>
* Carsten Otte <[email protected]>
* Martin Schwidefsky <[email protected]>
* Bugreports.to..: <[email protected]>
* Copyright IBM Corp. 1999, 2001
*
* i/o controls for the dasd driver.
*/
#define KMSG_COMPONENT "dasd"
#include <linux/interrupt.h>
#include <linux/compat.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/blkpg.h>
#include <linux/slab.h>
#include <asm/ccwdev.h>
#include <asm/schid.h>
#include <asm/cmb.h>
#include <linux/uaccess.h>
#include <linux/dasd_mod.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_ioctl:"
#include "dasd_int.h"
static int
dasd_ioctl_api_version(void __user *argp)
{
int ver = DASD_API_VERSION;
return put_user(ver, (int __user *)argp);
}
/*
* Enable device.
* used by dasdfmt after BIODASDDISABLE to retrigger blocksize detection
*/
static int
dasd_ioctl_enable(struct block_device *bdev)
{
struct dasd_device *base;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
dasd_enable_device(base);
dasd_put_device(base);
return 0;
}
/*
* Disable device.
* Used by dasdfmt. Disable I/O operations but allow ioctls.
*/
static int
dasd_ioctl_disable(struct block_device *bdev)
{
struct dasd_device *base;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
/*
* Man this is sick. We don't do a real disable but only downgrade
* the device to DASD_STATE_BASIC. The reason is that dasdfmt uses
* BIODASDDISABLE to disable accesses to the device via the block
* device layer but it still wants to do i/o on the device by
* using the BIODASDFMT ioctl. Therefore the correct state for the
* device is DASD_STATE_BASIC that allows to do basic i/o.
*/
dasd_set_target_state(base, DASD_STATE_BASIC);
/*
* Set i_size to zero, since read, write, etc. check against this
* value.
*/
set_capacity(bdev->bd_disk, 0);
dasd_put_device(base);
return 0;
}
/*
* Quiesce device.
*/
static int dasd_ioctl_quiesce(struct dasd_block *block)
{
unsigned long flags;
struct dasd_device *base;
base = block->base;
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
pr_info("%s: The DASD has been put in the quiesce "
"state\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
return 0;
}
/*
* Resume device.
*/
static int dasd_ioctl_resume(struct dasd_block *block)
{
unsigned long flags;
struct dasd_device *base;
base = block->base;
if (!capable (CAP_SYS_ADMIN))
return -EACCES;
pr_info("%s: I/O operations have been resumed "
"on the DASD\n", dev_name(&base->cdev->dev));
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
dasd_device_remove_stop_bits(base, DASD_STOPPED_QUIESCE);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
dasd_schedule_block_bh(block);
dasd_schedule_device_bh(base);
return 0;
}
/*
* Abort all failfast I/O on a device.
*/
static int dasd_ioctl_abortio(struct dasd_block *block)
{
unsigned long flags;
struct dasd_device *base;
struct dasd_ccw_req *cqr, *n;
base = block->base;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (test_and_set_bit(DASD_FLAG_ABORTALL, &base->flags))
return 0;
DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag set");
spin_lock_irqsave(&block->request_queue_lock, flags);
spin_lock(&block->queue_lock);
list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
if (test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
cqr->callback_data &&
cqr->callback_data != DASD_SLEEPON_START_TAG &&
cqr->callback_data != DASD_SLEEPON_END_TAG) {
spin_unlock(&block->queue_lock);
blk_abort_request(cqr->callback_data);
spin_lock(&block->queue_lock);
}
}
spin_unlock(&block->queue_lock);
spin_unlock_irqrestore(&block->request_queue_lock, flags);
dasd_schedule_block_bh(block);
return 0;
}
/*
* Allow I/O on a device
*/
static int dasd_ioctl_allowio(struct dasd_block *block)
{
struct dasd_device *base;
base = block->base;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (test_and_clear_bit(DASD_FLAG_ABORTALL, &base->flags))
DBF_DEV_EVENT(DBF_NOTICE, base, "%s", "abortall flag unset");
return 0;
}
/*
* performs formatting of _device_ according to _fdata_
* Note: The discipline's format_function is assumed to deliver formatting
* commands to format multiple units of the device. In terms of the ECKD
* devices this means CCWs are generated to format multiple tracks.
*/
static int
dasd_format(struct dasd_block *block, struct format_data_t *fdata)
{
struct dasd_device *base;
int rc;
base = block->base;
if (base->discipline->format_device == NULL)
return -EPERM;
if (base->state != DASD_STATE_BASIC) {
pr_warn("%s: The DASD cannot be formatted while it is enabled\n",
dev_name(&base->cdev->dev));
return -EBUSY;
}
DBF_DEV_EVENT(DBF_NOTICE, base,
"formatting units %u to %u (%u B blocks) flags %u",
fdata->start_unit,
fdata->stop_unit, fdata->blksize, fdata->intensity);
/* Since dasdfmt keeps the device open after it was disabled,
* there still exists an inode for this device.
* We must update i_blkbits, otherwise we might get errors when
* enabling the device later.
*/
if (fdata->start_unit == 0) {
block->gdp->part0->bd_inode->i_blkbits =
blksize_bits(fdata->blksize);
}
rc = base->discipline->format_device(base, fdata, 1);
if (rc == -EAGAIN)
rc = base->discipline->format_device(base, fdata, 0);
return rc;
}
static int dasd_check_format(struct dasd_block *block,
struct format_check_t *cdata)
{
struct dasd_device *base;
int rc;
base = block->base;
if (!base->discipline->check_device_format)
return -ENOTTY;
rc = base->discipline->check_device_format(base, cdata, 1);
if (rc == -EAGAIN)
rc = base->discipline->check_device_format(base, cdata, 0);
return rc;
}
/*
* Format device.
*/
static int
dasd_ioctl_format(struct block_device *bdev, void __user *argp)
{
struct dasd_device *base;
struct format_data_t fdata;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!argp)
return -EINVAL;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
if (base->features & DASD_FEATURE_READONLY ||
test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
dasd_put_device(base);
return -EROFS;
}
if (copy_from_user(&fdata, argp, sizeof(struct format_data_t))) {
dasd_put_device(base);
return -EFAULT;
}
if (bdev_is_partition(bdev)) {
pr_warn("%s: The specified DASD is a partition and cannot be formatted\n",
dev_name(&base->cdev->dev));
dasd_put_device(base);
return -EINVAL;
}
rc = dasd_format(base->block, &fdata);
dasd_put_device(base);
return rc;
}
/*
* Check device format
*/
static int dasd_ioctl_check_format(struct block_device *bdev, void __user *argp)
{
struct format_check_t cdata;
struct dasd_device *base;
int rc = 0;
if (!argp)
return -EINVAL;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
if (bdev_is_partition(bdev)) {
pr_warn("%s: The specified DASD is a partition and cannot be checked\n",
dev_name(&base->cdev->dev));
rc = -EINVAL;
goto out_err;
}
if (copy_from_user(&cdata, argp, sizeof(cdata))) {
rc = -EFAULT;
goto out_err;
}
rc = dasd_check_format(base->block, &cdata);
if (rc)
goto out_err;
if (copy_to_user(argp, &cdata, sizeof(cdata)))
rc = -EFAULT;
out_err:
dasd_put_device(base);
return rc;
}
static int dasd_release_space(struct dasd_device *device,
struct format_data_t *rdata)
{
if (!device->discipline->is_ese && !device->discipline->is_ese(device))
return -ENOTSUPP;
if (!device->discipline->release_space)
return -ENOTSUPP;
return device->discipline->release_space(device, rdata);
}
/*
* Release allocated space
*/
static int dasd_ioctl_release_space(struct block_device *bdev, void __user *argp)
{
struct format_data_t rdata;
struct dasd_device *base;
int rc = 0;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!argp)
return -EINVAL;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
if (base->features & DASD_FEATURE_READONLY ||
test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
rc = -EROFS;
goto out_err;
}
if (bdev_is_partition(bdev)) {
pr_warn("%s: The specified DASD is a partition and tracks cannot be released\n",
dev_name(&base->cdev->dev));
rc = -EINVAL;
goto out_err;
}
if (copy_from_user(&rdata, argp, sizeof(rdata))) {
rc = -EFAULT;
goto out_err;
}
rc = dasd_release_space(base, &rdata);
out_err:
dasd_put_device(base);
return rc;
}
/*
* Swap driver iternal copy relation.
*/
static int
dasd_ioctl_copy_pair_swap(struct block_device *bdev, void __user *argp)
{
struct dasd_copypair_swap_data_t data;
struct dasd_device *device;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
device = dasd_device_from_gendisk(bdev->bd_disk);
if (!device)
return -ENODEV;
if (copy_from_user(&data, argp, sizeof(struct dasd_copypair_swap_data_t))) {
dasd_put_device(device);
return -EFAULT;
}
if (memchr_inv(data.reserved, 0, sizeof(data.reserved))) {
pr_warn("%s: Invalid swap data specified\n",
dev_name(&device->cdev->dev));
dasd_put_device(device);
return DASD_COPYPAIRSWAP_INVALID;
}
if (bdev_is_partition(bdev)) {
pr_warn("%s: The specified DASD is a partition and cannot be swapped\n",
dev_name(&device->cdev->dev));
dasd_put_device(device);
return DASD_COPYPAIRSWAP_INVALID;
}
if (!device->copy) {
pr_warn("%s: The specified DASD has no copy pair set up\n",
dev_name(&device->cdev->dev));
dasd_put_device(device);
return -ENODEV;
}
if (!device->discipline->copy_pair_swap) {
dasd_put_device(device);
return -EOPNOTSUPP;
}
rc = device->discipline->copy_pair_swap(device, data.primary,
data.secondary);
dasd_put_device(device);
return rc;
}
#ifdef CONFIG_DASD_PROFILE
/*
* Reset device profile information
*/
static int dasd_ioctl_reset_profile(struct dasd_block *block)
{
dasd_profile_reset(&block->profile);
return 0;
}
/*
* Return device profile information
*/
static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
{
struct dasd_profile_info_t *data;
int rc = 0;
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_bh(&block->profile.lock);
if (block->profile.data) {
data->dasd_io_reqs = block->profile.data->dasd_io_reqs;
data->dasd_io_sects = block->profile.data->dasd_io_sects;
memcpy(data->dasd_io_secs, block->profile.data->dasd_io_secs,
sizeof(data->dasd_io_secs));
memcpy(data->dasd_io_times, block->profile.data->dasd_io_times,
sizeof(data->dasd_io_times));
memcpy(data->dasd_io_timps, block->profile.data->dasd_io_timps,
sizeof(data->dasd_io_timps));
memcpy(data->dasd_io_time1, block->profile.data->dasd_io_time1,
sizeof(data->dasd_io_time1));
memcpy(data->dasd_io_time2, block->profile.data->dasd_io_time2,
sizeof(data->dasd_io_time2));
memcpy(data->dasd_io_time2ps,
block->profile.data->dasd_io_time2ps,
sizeof(data->dasd_io_time2ps));
memcpy(data->dasd_io_time3, block->profile.data->dasd_io_time3,
sizeof(data->dasd_io_time3));
memcpy(data->dasd_io_nr_req,
block->profile.data->dasd_io_nr_req,
sizeof(data->dasd_io_nr_req));
spin_unlock_bh(&block->profile.lock);
} else {
spin_unlock_bh(&block->profile.lock);
rc = -EIO;
goto out;
}
if (copy_to_user(argp, data, sizeof(*data)))
rc = -EFAULT;
out:
kfree(data);
return rc;
}
#else
static int dasd_ioctl_reset_profile(struct dasd_block *block)
{
return -ENOTTY;
}
static int dasd_ioctl_read_profile(struct dasd_block *block, void __user *argp)
{
return -ENOTTY;
}
#endif
/*
* Return dasd information. Used for BIODASDINFO and BIODASDINFO2.
*/
static int __dasd_ioctl_information(struct dasd_block *block,
struct dasd_information2_t *dasd_info)
{
struct subchannel_id sch_id;
struct ccw_dev_id dev_id;
struct dasd_device *base;
struct ccw_device *cdev;
struct list_head *l;
unsigned long flags;
int rc;
base = block->base;
if (!base->discipline || !base->discipline->fill_info)
return -EINVAL;
rc = base->discipline->fill_info(base, dasd_info);
if (rc)
return rc;
cdev = base->cdev;
ccw_device_get_id(cdev, &dev_id);
ccw_device_get_schid(cdev, &sch_id);
dasd_info->devno = dev_id.devno;
dasd_info->schid = sch_id.sch_no;
dasd_info->cu_type = cdev->id.cu_type;
dasd_info->cu_model = cdev->id.cu_model;
dasd_info->dev_type = cdev->id.dev_type;
dasd_info->dev_model = cdev->id.dev_model;
dasd_info->status = base->state;
/*
* The open_count is increased for every opener, that includes
* the blkdev_get in dasd_scan_partitions.
* This must be hidden from user-space.
*/
dasd_info->open_count = atomic_read(&block->open_count);
if (!block->bdev)
dasd_info->open_count++;
/*
* check if device is really formatted
* LDL / CDL was returned by 'fill_info'
*/
if ((base->state < DASD_STATE_READY) ||
(dasd_check_blocksize(block->bp_block)))
dasd_info->format = DASD_FORMAT_NONE;
dasd_info->features |=
((base->features & DASD_FEATURE_READONLY) != 0);
memcpy(dasd_info->type, base->discipline->name, 4);
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
list_for_each(l, &base->ccw_queue)
dasd_info->chanq_len++;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
return 0;
}
static int dasd_ioctl_information(struct dasd_block *block, void __user *argp,
size_t copy_size)
{
struct dasd_information2_t *dasd_info;
int error;
dasd_info = kzalloc(sizeof(*dasd_info), GFP_KERNEL);
if (!dasd_info)
return -ENOMEM;
error = __dasd_ioctl_information(block, dasd_info);
if (!error && copy_to_user(argp, dasd_info, copy_size))
error = -EFAULT;
kfree(dasd_info);
return error;
}
/*
* Set read only
*/
int dasd_set_read_only(struct block_device *bdev, bool ro)
{
struct dasd_device *base;
int rc;
/* do not manipulate hardware state for partitions */
if (bdev_is_partition(bdev))
return 0;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
if (!ro && test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
rc = -EROFS;
else
rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, ro);
dasd_put_device(base);
return rc;
}
static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
struct cmbdata __user *argp)
{
size_t size = _IOC_SIZE(cmd);
struct cmbdata data;
int ret;
ret = cmf_readall(block->base->cdev, &data);
if (!ret && copy_to_user(argp, &data, min(size, sizeof(*argp))))
return -EFAULT;
return ret;
}
int dasd_ioctl(struct block_device *bdev, blk_mode_t mode,
unsigned int cmd, unsigned long arg)
{
struct dasd_block *block;
struct dasd_device *base;
void __user *argp;
int rc;
if (is_compat_task())
argp = compat_ptr(arg);
else
argp = (void __user *)arg;
if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg)
return -EINVAL;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
block = base->block;
rc = 0;
switch (cmd) {
case BIODASDDISABLE:
rc = dasd_ioctl_disable(bdev);
break;
case BIODASDENABLE:
rc = dasd_ioctl_enable(bdev);
break;
case BIODASDQUIESCE:
rc = dasd_ioctl_quiesce(block);
break;
case BIODASDRESUME:
rc = dasd_ioctl_resume(block);
break;
case BIODASDABORTIO:
rc = dasd_ioctl_abortio(block);
break;
case BIODASDALLOWIO:
rc = dasd_ioctl_allowio(block);
break;
case BIODASDFMT:
rc = dasd_ioctl_format(bdev, argp);
break;
case BIODASDCHECKFMT:
rc = dasd_ioctl_check_format(bdev, argp);
break;
case BIODASDINFO:
rc = dasd_ioctl_information(block, argp,
sizeof(struct dasd_information_t));
break;
case BIODASDINFO2:
rc = dasd_ioctl_information(block, argp,
sizeof(struct dasd_information2_t));
break;
case BIODASDPRRD:
rc = dasd_ioctl_read_profile(block, argp);
break;
case BIODASDPRRST:
rc = dasd_ioctl_reset_profile(block);
break;
case DASDAPIVER:
rc = dasd_ioctl_api_version(argp);
break;
case BIODASDCMFENABLE:
rc = enable_cmf(base->cdev);
break;
case BIODASDCMFDISABLE:
rc = disable_cmf(base->cdev);
break;
case BIODASDREADALLCMB:
rc = dasd_ioctl_readall_cmb(block, cmd, argp);
break;
case BIODASDRAS:
rc = dasd_ioctl_release_space(bdev, argp);
break;
case BIODASDCOPYPAIRSWAP:
rc = dasd_ioctl_copy_pair_swap(bdev, argp);
break;
default:
/* if the discipline has an ioctl method try it. */
rc = -ENOTTY;
if (base->discipline->ioctl)
rc = base->discipline->ioctl(block, cmd, argp);
}
dasd_put_device(base);
return rc;
}
/**
* dasd_biodasdinfo() - fill out the dasd information structure
* @disk: [in] pointer to gendisk structure that references a DASD
* @info: [out] pointer to the dasd_information2_t structure
*
* Provide access to DASD specific information.
* The gendisk structure is checked if it belongs to the DASD driver by
* comparing the gendisk->fops pointer.
* If it does not belong to the DASD driver -EINVAL is returned.
* Otherwise the provided dasd_information2_t structure is filled out.
*
* Returns:
* %0 on success and a negative error value on failure.
*/
int dasd_biodasdinfo(struct gendisk *disk, struct dasd_information2_t *info)
{
struct dasd_device *base;
int error;
if (disk->fops != &dasd_device_operations)
return -EINVAL;
base = dasd_device_from_gendisk(disk);
if (!base)
return -ENODEV;
error = __dasd_ioctl_information(base->block, info);
dasd_put_device(base);
return error;
}
/* export that symbol_get in partition detection is possible */
EXPORT_SYMBOL_GPL(dasd_biodasdinfo);
| linux-master | drivers/s390/block/dasd_ioctl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <[email protected]>
* Based on.......: linux/drivers/s390/block/mdisk.c
* ...............: by Hartmunt Penner <[email protected]>
* Bugreports.to..: <[email protected]>
* Copyright IBM Corp. 1999, 2000
*
*/
#define KMSG_COMPONENT "dasd"
#include <linux/kernel_stat.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <asm/asm-extable.h>
#include <asm/dasd.h>
#include <asm/debug.h>
#include <asm/diag.h>
#include <asm/ebcdic.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/vtoc.h>
#include "dasd_int.h"
#include "dasd_diag.h"
#define PRINTK_HEADER "dasd(diag):"
MODULE_LICENSE("GPL");
/* The maximum number of blocks per request (max_blocks) is dependent on the
* amount of storage that is available in the static I/O buffer for each
* device. Currently each device gets 2 pages. We want to fit two requests
* into the available memory so that we can immediately start the next if one
* finishes. */
#define DIAG_MAX_BLOCKS (((2 * PAGE_SIZE - sizeof(struct dasd_ccw_req) - \
sizeof(struct dasd_diag_req)) / \
sizeof(struct dasd_diag_bio)) / 2)
#define DIAG_MAX_RETRIES 32
#define DIAG_TIMEOUT 50
static struct dasd_discipline dasd_diag_discipline;
struct dasd_diag_private {
struct dasd_diag_characteristics rdc_data;
struct dasd_diag_rw_io iob;
struct dasd_diag_init_io iib;
blocknum_t pt_block;
struct ccw_dev_id dev_id;
};
struct dasd_diag_req {
unsigned int block_count;
struct dasd_diag_bio bio[];
};
static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
/* Perform DIAG250 call with block I/O parameter list iob (input and output)
* and function code cmd.
* In case of an exception return 3. Otherwise return result of bitwise OR of
* resulting condition code and DIAG return code. */
static inline int __dia250(void *iob, int cmd)
{
union register_pair rx = { .even = (unsigned long)iob, };
typedef union {
struct dasd_diag_init_io init_io;
struct dasd_diag_rw_io rw_io;
} addr_type;
int cc;
cc = 3;
asm volatile(
" diag %[rx],%[cmd],0x250\n"
"0: ipm %[cc]\n"
" srl %[cc],28\n"
"1:\n"
EX_TABLE(0b,1b)
: [cc] "+&d" (cc), [rx] "+&d" (rx.pair), "+m" (*(addr_type *)iob)
: [cmd] "d" (cmd)
: "cc");
return cc | rx.odd;
}
static inline int dia250(void *iob, int cmd)
{
diag_stat_inc(DIAG_STAT_X250);
return __dia250(iob, cmd);
}
/* Initialize block I/O to DIAG device using the specified blocksize and
* block offset. On success, return zero and set end_block to contain the
* number of blocks on the device minus the specified offset. Return non-zero
* otherwise. */
static inline int
mdsk_init_io(struct dasd_device *device, unsigned int blocksize,
blocknum_t offset, blocknum_t *end_block)
{
struct dasd_diag_private *private = device->private;
struct dasd_diag_init_io *iib = &private->iib;
int rc;
memset(iib, 0, sizeof (struct dasd_diag_init_io));
iib->dev_nr = private->dev_id.devno;
iib->block_size = blocksize;
iib->offset = offset;
iib->flaga = DASD_DIAG_FLAGA_DEFAULT;
rc = dia250(iib, INIT_BIO);
if ((rc & 3) == 0 && end_block)
*end_block = iib->end_block;
return rc;
}
/* Remove block I/O environment for device. Return zero on success, non-zero
* otherwise. */
static inline int
mdsk_term_io(struct dasd_device * device)
{
struct dasd_diag_private *private = device->private;
struct dasd_diag_init_io *iib = &private->iib;
int rc;
memset(iib, 0, sizeof (struct dasd_diag_init_io));
iib->dev_nr = private->dev_id.devno;
rc = dia250(iib, TERM_BIO);
return rc;
}
/* Error recovery for failed DIAG requests - try to reestablish the DIAG
* environment. */
static void
dasd_diag_erp(struct dasd_device *device)
{
int rc;
mdsk_term_io(device);
rc = mdsk_init_io(device, device->block->bp_block, 0, NULL);
if (rc == 4) {
if (!(test_and_set_bit(DASD_FLAG_DEVICE_RO, &device->flags)))
pr_warn("%s: The access mode of a DIAG device changed to read-only\n",
dev_name(&device->cdev->dev));
rc = 0;
}
if (rc)
pr_warn("%s: DIAG ERP failed with rc=%d\n",
dev_name(&device->cdev->dev), rc);
}
/* Start a given request at the device. Return zero on success, non-zero
* otherwise. */
static int
dasd_start_diag(struct dasd_ccw_req * cqr)
{
struct dasd_device *device;
struct dasd_diag_private *private;
struct dasd_diag_req *dreq;
int rc;
device = cqr->startdev;
if (cqr->retries < 0) {
DBF_DEV_EVENT(DBF_ERR, device, "DIAG start_IO: request %p "
"- no retry left)", cqr);
cqr->status = DASD_CQR_ERROR;
return -EIO;
}
private = device->private;
dreq = cqr->data;
private->iob.dev_nr = private->dev_id.devno;
private->iob.key = 0;
private->iob.flags = DASD_DIAG_RWFLAG_ASYNC;
private->iob.block_count = dreq->block_count;
private->iob.interrupt_params = (addr_t) cqr;
private->iob.bio_list = dreq->bio;
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
cqr->startclk = get_tod_clock();
cqr->starttime = jiffies;
cqr->retries--;
rc = dia250(&private->iob, RW_BIO);
switch (rc) {
case 0: /* Synchronous I/O finished successfully */
cqr->stopclk = get_tod_clock();
cqr->status = DASD_CQR_SUCCESS;
/* Indicate to calling function that only a dasd_schedule_bh()
and no timer is needed */
rc = -EACCES;
break;
case 8: /* Asynchronous I/O was started */
cqr->status = DASD_CQR_IN_IO;
rc = 0;
break;
default: /* Error condition */
cqr->status = DASD_CQR_QUEUED;
DBF_DEV_EVENT(DBF_WARNING, device, "dia250 returned rc=%d", rc);
dasd_diag_erp(device);
rc = -EIO;
break;
}
cqr->intrc = rc;
return rc;
}
/* Terminate given request at the device. */
static int
dasd_diag_term_IO(struct dasd_ccw_req * cqr)
{
struct dasd_device *device;
device = cqr->startdev;
mdsk_term_io(device);
mdsk_init_io(device, device->block->bp_block, 0, NULL);
cqr->status = DASD_CQR_CLEAR_PENDING;
cqr->stopclk = get_tod_clock();
dasd_schedule_device_bh(device);
return 0;
}
/* Handle external interruption. */
static void dasd_ext_handler(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
struct dasd_ccw_req *cqr, *next;
struct dasd_device *device;
unsigned long expires;
unsigned long flags;
addr_t ip;
int rc;
switch (ext_code.subcode >> 8) {
case DASD_DIAG_CODE_31BIT:
ip = (addr_t) param32;
break;
case DASD_DIAG_CODE_64BIT:
ip = (addr_t) param64;
break;
default:
return;
}
inc_irq_stat(IRQEXT_DSD);
if (!ip) { /* no intparm: unsolicited interrupt */
DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
"interrupt");
return;
}
cqr = (struct dasd_ccw_req *) ip;
device = (struct dasd_device *) cqr->startdev;
if (strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
DBF_DEV_EVENT(DBF_WARNING, device,
" magic number of dasd_ccw_req 0x%08X doesn't"
" match discipline 0x%08X",
cqr->magic, *(int *) (&device->discipline->name));
return;
}
/* get irq lock to modify request queue */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
/* Check for a pending clear operation */
if (cqr->status == DASD_CQR_CLEAR_PENDING) {
cqr->status = DASD_CQR_CLEARED;
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return;
}
cqr->stopclk = get_tod_clock();
expires = 0;
if ((ext_code.subcode & 0xff) == 0) {
cqr->status = DASD_CQR_SUCCESS;
/* Start first request on queue if possible -> fast_io. */
if (!list_empty(&device->ccw_queue)) {
next = list_entry(device->ccw_queue.next,
struct dasd_ccw_req, devlist);
if (next->status == DASD_CQR_QUEUED) {
rc = dasd_start_diag(next);
if (rc == 0)
expires = next->expires;
}
}
} else {
cqr->status = DASD_CQR_QUEUED;
DBF_DEV_EVENT(DBF_DEBUG, device, "interrupt status for "
"request %p was %d (%d retries left)", cqr,
ext_code.subcode & 0xff, cqr->retries);
dasd_diag_erp(device);
}
if (expires != 0)
dasd_device_set_timer(device, expires);
else
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
/* Check whether device can be controlled by DIAG discipline. Return zero on
* success, non-zero otherwise. */
static int
dasd_diag_check_device(struct dasd_device *device)
{
struct dasd_diag_private *private = device->private;
struct dasd_diag_characteristics *rdc_data;
struct vtoc_cms_label *label;
struct dasd_block *block;
struct dasd_diag_bio *bio;
unsigned int sb, bsize;
blocknum_t end_block;
int rc;
if (private == NULL) {
private = kzalloc(sizeof(*private), GFP_KERNEL);
if (private == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Allocating memory for private DASD data "
"failed\n");
return -ENOMEM;
}
ccw_device_get_id(device->cdev, &private->dev_id);
device->private = private;
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"could not allocate dasd block structure");
device->private = NULL;
kfree(private);
return PTR_ERR(block);
}
device->block = block;
block->base = device;
/* Read Device Characteristics */
rdc_data = &private->rdc_data;
rdc_data->dev_nr = private->dev_id.devno;
rdc_data->rdc_len = sizeof (struct dasd_diag_characteristics);
rc = diag210((struct diag210 *) rdc_data);
if (rc) {
DBF_DEV_EVENT(DBF_WARNING, device, "failed to retrieve device "
"information (rc=%d)", rc);
rc = -EOPNOTSUPP;
goto out;
}
device->default_expires = DIAG_TIMEOUT;
device->default_retries = DIAG_MAX_RETRIES;
/* Figure out position of label block */
switch (private->rdc_data.vdev_class) {
case DEV_CLASS_FBA:
private->pt_block = 1;
break;
case DEV_CLASS_ECKD:
private->pt_block = 2;
break;
default:
pr_warn("%s: Device type %d is not supported in DIAG mode\n",
dev_name(&device->cdev->dev),
private->rdc_data.vdev_class);
rc = -EOPNOTSUPP;
goto out;
}
DBF_DEV_EVENT(DBF_INFO, device,
"%04X: %04X on real %04X/%02X",
rdc_data->dev_nr,
rdc_data->vdev_type,
rdc_data->rdev_type, rdc_data->rdev_model);
/* terminate all outstanding operations */
mdsk_term_io(device);
/* figure out blocksize of device */
label = (struct vtoc_cms_label *) get_zeroed_page(GFP_KERNEL);
if (label == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"No memory to allocate initialization request");
rc = -ENOMEM;
goto out;
}
bio = kzalloc(sizeof(*bio), GFP_KERNEL);
if (bio == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"No memory to allocate initialization bio");
rc = -ENOMEM;
goto out_label;
}
rc = 0;
end_block = 0;
/* try all sizes - needed for ECKD devices */
for (bsize = 512; bsize <= PAGE_SIZE; bsize <<= 1) {
mdsk_init_io(device, bsize, 0, &end_block);
memset(bio, 0, sizeof(*bio));
bio->type = MDSK_READ_REQ;
bio->block_number = private->pt_block + 1;
bio->buffer = label;
memset(&private->iob, 0, sizeof (struct dasd_diag_rw_io));
private->iob.dev_nr = rdc_data->dev_nr;
private->iob.key = 0;
private->iob.flags = 0; /* do synchronous io */
private->iob.block_count = 1;
private->iob.interrupt_params = 0;
private->iob.bio_list = bio;
private->iob.flaga = DASD_DIAG_FLAGA_DEFAULT;
rc = dia250(&private->iob, RW_BIO);
if (rc == 3) {
pr_warn("%s: A 64-bit DIAG call failed\n",
dev_name(&device->cdev->dev));
rc = -EOPNOTSUPP;
goto out_bio;
}
mdsk_term_io(device);
if (rc == 0)
break;
}
if (bsize > PAGE_SIZE) {
pr_warn("%s: Accessing the DASD failed because of an incorrect format (rc=%d)\n",
dev_name(&device->cdev->dev), rc);
rc = -EIO;
goto out_bio;
}
/* check for label block */
if (memcmp(label->label_id, DASD_DIAG_CMS1,
sizeof(DASD_DIAG_CMS1)) == 0) {
/* get formatted blocksize from label block */
bsize = (unsigned int) label->block_size;
block->blocks = (unsigned long) label->block_count;
} else
block->blocks = end_block;
block->bp_block = bsize;
block->s2b_shift = 0; /* bits to shift 512 to get a block */
for (sb = 512; sb < bsize; sb = sb << 1)
block->s2b_shift++;
rc = mdsk_init_io(device, block->bp_block, 0, NULL);
if (rc && (rc != 4)) {
pr_warn("%s: DIAG initialization failed with rc=%d\n",
dev_name(&device->cdev->dev), rc);
rc = -EIO;
} else {
if (rc == 4)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
pr_info("%s: New DASD with %ld byte/block, total size %ld "
"KB%s\n", dev_name(&device->cdev->dev),
(unsigned long) block->bp_block,
(unsigned long) (block->blocks <<
block->s2b_shift) >> 1,
(rc == 4) ? ", read-only device" : "");
rc = 0;
}
out_bio:
kfree(bio);
out_label:
free_page((long) label);
out:
if (rc) {
device->block = NULL;
dasd_free_block(block);
device->private = NULL;
kfree(private);
}
return rc;
}
/* Fill in virtual disk geometry for device. Return zero on success, non-zero
* otherwise. */
static int
dasd_diag_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
{
if (dasd_check_blocksize(block->bp_block) != 0)
return -EINVAL;
geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
geo->heads = 16;
geo->sectors = 128 >> block->s2b_shift;
return 0;
}
static dasd_erp_fn_t
dasd_diag_erp_action(struct dasd_ccw_req * cqr)
{
return dasd_default_erp_action;
}
static dasd_erp_fn_t
dasd_diag_erp_postaction(struct dasd_ccw_req * cqr)
{
return dasd_default_erp_postaction;
}
/* Create DASD request from block device request. Return pointer to new
* request on success, ERR_PTR otherwise. */
static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
struct dasd_block *block,
struct request *req)
{
struct dasd_ccw_req *cqr;
struct dasd_diag_req *dreq;
struct dasd_diag_bio *dbio;
struct req_iterator iter;
struct bio_vec bv;
char *dst;
unsigned int count;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char rw_cmd;
if (rq_data_dir(req) == READ)
rw_cmd = MDSK_READ_REQ;
else if (rq_data_dir(req) == WRITE)
rw_cmd = MDSK_WRITE_REQ;
else
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
first_rec = blk_rq_pos(req) >> block->s2b_shift;
last_rec =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
rq_for_each_segment(bv, req, iter) {
if (bv.bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv.bv_len >> (block->s2b_shift + 9);
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
return ERR_PTR(-EINVAL);
/* Build the request */
cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, struct_size(dreq, bio, count),
memdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
dreq = (struct dasd_diag_req *) cqr->data;
dreq->block_count = count;
dbio = dreq->bio;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
memset(dbio, 0, sizeof (struct dasd_diag_bio));
dbio->type = rw_cmd;
dbio->block_number = recid + 1;
dbio->buffer = dst;
dbio++;
dst += blksize;
recid++;
}
}
cqr->retries = memdev->default_retries;
cqr->buildclk = get_tod_clock();
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev;
cqr->memdev = memdev;
cqr->block = block;
cqr->expires = memdev->default_expires * HZ;
cqr->status = DASD_CQR_FILLED;
return cqr;
}
/* Release DASD request. Return non-zero if request was successful, zero
* otherwise. */
static int
dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
int status;
status = cqr->status == DASD_CQR_DONE;
dasd_sfree_request(cqr, cqr->memdev);
return status;
}
static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
{
if (cqr->retries < 0)
cqr->status = DASD_CQR_FAILED;
else
cqr->status = DASD_CQR_FILLED;
};
/* Fill in IOCTL data for device. */
static int
dasd_diag_fill_info(struct dasd_device * device,
struct dasd_information2_t * info)
{
struct dasd_diag_private *private = device->private;
info->label_block = (unsigned int) private->pt_block;
info->FBA_layout = 1;
info->format = DASD_FORMAT_LDL;
info->characteristics_size = sizeof(private->rdc_data);
memcpy(info->characteristics, &private->rdc_data,
sizeof(private->rdc_data));
info->confdata_size = 0;
return 0;
}
static void
dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *stat)
{
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"dump sense not available for DIAG data");
}
/*
* Initialize block layer request queue.
*/
static void dasd_diag_setup_blk_queue(struct dasd_block *block)
{
unsigned int logical_block_size = block->bp_block;
struct request_queue *q = block->gdp->queue;
int max;
max = DIAG_MAX_BLOCKS << block->s2b_shift;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
q->limits.max_dev_sectors = max;
blk_queue_logical_block_size(q, logical_block_size);
blk_queue_max_hw_sectors(q, max);
blk_queue_max_segments(q, USHRT_MAX);
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
blk_queue_dma_alignment(q, PAGE_SIZE - 1);
}
static int dasd_diag_pe_handler(struct dasd_device *device,
__u8 tbvpm, __u8 fcsecpm)
{
return dasd_generic_verify_path(device, tbvpm);
}
static struct dasd_discipline dasd_diag_discipline = {
.owner = THIS_MODULE,
.name = "DIAG",
.ebcname = "DIAG",
.check_device = dasd_diag_check_device,
.pe_handler = dasd_diag_pe_handler,
.fill_geometry = dasd_diag_fill_geometry,
.setup_blk_queue = dasd_diag_setup_blk_queue,
.start_IO = dasd_start_diag,
.term_IO = dasd_diag_term_IO,
.handle_terminated_request = dasd_diag_handle_terminated_request,
.erp_action = dasd_diag_erp_action,
.erp_postaction = dasd_diag_erp_postaction,
.build_cp = dasd_diag_build_cp,
.free_cp = dasd_diag_free_cp,
.dump_sense = dasd_diag_dump_sense,
.fill_info = dasd_diag_fill_info,
};
static int __init
dasd_diag_init(void)
{
if (!MACHINE_IS_VM) {
pr_info("Discipline %s cannot be used without z/VM\n",
dasd_diag_discipline.name);
return -ENODEV;
}
ASCEBC(dasd_diag_discipline.ebcname, 4);
irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
register_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
dasd_diag_discipline_pointer = &dasd_diag_discipline;
return 0;
}
static void __exit
dasd_diag_cleanup(void)
{
unregister_external_irq(EXT_IRQ_CP_SERVICE, dasd_ext_handler);
irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
dasd_diag_discipline_pointer = NULL;
}
module_init(dasd_diag_init);
module_exit(dasd_diag_cleanup);
| linux-master | drivers/s390/block/dasd_diag.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Device driver for s390 storage class memory.
*
* Copyright IBM Corp. 2012
* Author(s): Sebastian Ott <[email protected]>
*/
#define KMSG_COMPONENT "scm_block"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/eadm.h>
#include "scm_blk.h"
static void scm_notify(struct scm_device *scmdev, enum scm_event event)
{
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
switch (event) {
case SCM_CHANGE:
pr_info("%lx: The capabilities of the SCM increment changed\n",
(unsigned long) scmdev->address);
SCM_LOG(2, "State changed");
SCM_LOG_STATE(2, scmdev);
break;
case SCM_AVAIL:
SCM_LOG(2, "Increment available");
SCM_LOG_STATE(2, scmdev);
scm_blk_set_available(bdev);
break;
}
}
static int scm_probe(struct scm_device *scmdev)
{
struct scm_blk_dev *bdev;
int ret;
SCM_LOG(2, "probe");
SCM_LOG_STATE(2, scmdev);
if (scmdev->attrs.oper_state != OP_STATE_GOOD)
return -EINVAL;
bdev = kzalloc(sizeof(*bdev), GFP_KERNEL);
if (!bdev)
return -ENOMEM;
dev_set_drvdata(&scmdev->dev, bdev);
ret = scm_blk_dev_setup(bdev, scmdev);
if (ret) {
dev_set_drvdata(&scmdev->dev, NULL);
kfree(bdev);
goto out;
}
out:
return ret;
}
static void scm_remove(struct scm_device *scmdev)
{
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
scm_blk_dev_cleanup(bdev);
dev_set_drvdata(&scmdev->dev, NULL);
kfree(bdev);
}
static struct scm_driver scm_drv = {
.drv = {
.name = "scm_block",
.owner = THIS_MODULE,
},
.notify = scm_notify,
.probe = scm_probe,
.remove = scm_remove,
.handler = scm_blk_irq,
};
int __init scm_drv_init(void)
{
return scm_driver_register(&scm_drv);
}
void scm_drv_cleanup(void)
{
scm_driver_unregister(&scm_drv);
}
| linux-master | drivers/s390/block/scm_drv.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PAV alias management for the DASD ECKD discipline
*
* Copyright IBM Corp. 2007
* Author(s): Stefan Weinhuber <[email protected]>
*/
#define KMSG_COMPONENT "dasd-eckd"
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
#ifdef PRINTK_HEADER
#undef PRINTK_HEADER
#endif /* PRINTK_HEADER */
#define PRINTK_HEADER "dasd(eckd):"
/*
* General concept of alias management:
* - PAV and DASD alias management is specific to the eckd discipline.
* - A device is connected to an lcu as long as the device exists.
* dasd_alias_make_device_known_to_lcu will be called wenn the
* device is checked by the eckd discipline and
* dasd_alias_disconnect_device_from_lcu will be called
* before the device is deleted.
* - The dasd_alias_add_device / dasd_alias_remove_device
* functions mark the point when a device is 'ready for service'.
* - A summary unit check is a rare occasion, but it is mandatory to
* support it. It requires some complex recovery actions before the
* devices can be used again (see dasd_alias_handle_summary_unit_check).
* - dasd_alias_get_start_dev will find an alias device that can be used
* instead of the base device and does some (very simple) load balancing.
* This is the function that gets called for each I/O, so when improving
* something, this function should get faster or better, the rest has just
* to be correct.
*/
static void summary_unit_check_handling_work(struct work_struct *);
static void lcu_update_work(struct work_struct *);
static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
static struct alias_root aliastree = {
.serverlist = LIST_HEAD_INIT(aliastree.serverlist),
.lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
};
static struct alias_server *_find_server(struct dasd_uid *uid)
{
struct alias_server *pos;
list_for_each_entry(pos, &aliastree.serverlist, server) {
if (!strncmp(pos->uid.vendor, uid->vendor,
sizeof(uid->vendor))
&& !strncmp(pos->uid.serial, uid->serial,
sizeof(uid->serial)))
return pos;
}
return NULL;
}
static struct alias_lcu *_find_lcu(struct alias_server *server,
struct dasd_uid *uid)
{
struct alias_lcu *pos;
list_for_each_entry(pos, &server->lculist, lcu) {
if (pos->uid.ssid == uid->ssid)
return pos;
}
return NULL;
}
static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
struct dasd_uid *uid)
{
struct alias_pav_group *pos;
__u8 search_unit_addr;
/* for hyper pav there is only one group */
if (lcu->pav == HYPER_PAV) {
if (list_empty(&lcu->grouplist))
return NULL;
else
return list_first_entry(&lcu->grouplist,
struct alias_pav_group, group);
}
/* for base pav we have to find the group that matches the base */
if (uid->type == UA_BASE_DEVICE)
search_unit_addr = uid->real_unit_addr;
else
search_unit_addr = uid->base_unit_addr;
list_for_each_entry(pos, &lcu->grouplist, group) {
if (pos->uid.base_unit_addr == search_unit_addr &&
!strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
return pos;
}
return NULL;
}
static struct alias_server *_allocate_server(struct dasd_uid *uid)
{
struct alias_server *server;
server = kzalloc(sizeof(*server), GFP_KERNEL);
if (!server)
return ERR_PTR(-ENOMEM);
memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
INIT_LIST_HEAD(&server->server);
INIT_LIST_HEAD(&server->lculist);
return server;
}
static void _free_server(struct alias_server *server)
{
kfree(server);
}
static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
{
struct alias_lcu *lcu;
lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
if (!lcu)
return ERR_PTR(-ENOMEM);
lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
if (!lcu->uac)
goto out_err1;
lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
if (!lcu->rsu_cqr)
goto out_err2;
lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
GFP_KERNEL | GFP_DMA);
if (!lcu->rsu_cqr->cpaddr)
goto out_err3;
lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
if (!lcu->rsu_cqr->data)
goto out_err4;
memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
lcu->uid.ssid = uid->ssid;
lcu->pav = NO_PAV;
lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
INIT_LIST_HEAD(&lcu->lcu);
INIT_LIST_HEAD(&lcu->inactive_devices);
INIT_LIST_HEAD(&lcu->active_devices);
INIT_LIST_HEAD(&lcu->grouplist);
INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
spin_lock_init(&lcu->lock);
init_completion(&lcu->lcu_setup);
return lcu;
out_err4:
kfree(lcu->rsu_cqr->cpaddr);
out_err3:
kfree(lcu->rsu_cqr);
out_err2:
kfree(lcu->uac);
out_err1:
kfree(lcu);
return ERR_PTR(-ENOMEM);
}
static void _free_lcu(struct alias_lcu *lcu)
{
kfree(lcu->rsu_cqr->data);
kfree(lcu->rsu_cqr->cpaddr);
kfree(lcu->rsu_cqr);
kfree(lcu->uac);
kfree(lcu);
}
/*
* This is the function that will allocate all the server and lcu data,
* so this function must be called first for a new device.
* If the return value is 1, the lcu was already known before, if it
* is 0, this is a new lcu.
* Negative return code indicates that something went wrong (e.g. -ENOMEM)
*/
int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
unsigned long flags;
struct alias_server *server, *newserver;
struct alias_lcu *lcu, *newlcu;
struct dasd_uid uid;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&aliastree.lock, flags);
server = _find_server(&uid);
if (!server) {
spin_unlock_irqrestore(&aliastree.lock, flags);
newserver = _allocate_server(&uid);
if (IS_ERR(newserver))
return PTR_ERR(newserver);
spin_lock_irqsave(&aliastree.lock, flags);
server = _find_server(&uid);
if (!server) {
list_add(&newserver->server, &aliastree.serverlist);
server = newserver;
} else {
/* someone was faster */
_free_server(newserver);
}
}
lcu = _find_lcu(server, &uid);
if (!lcu) {
spin_unlock_irqrestore(&aliastree.lock, flags);
newlcu = _allocate_lcu(&uid);
if (IS_ERR(newlcu))
return PTR_ERR(newlcu);
spin_lock_irqsave(&aliastree.lock, flags);
lcu = _find_lcu(server, &uid);
if (!lcu) {
list_add(&newlcu->lcu, &server->lculist);
lcu = newlcu;
} else {
/* someone was faster */
_free_lcu(newlcu);
}
}
spin_lock(&lcu->lock);
list_add(&device->alias_list, &lcu->inactive_devices);
private->lcu = lcu;
spin_unlock(&lcu->lock);
spin_unlock_irqrestore(&aliastree.lock, flags);
return 0;
}
/*
* This function removes a device from the scope of alias management.
* The complicated part is to make sure that it is not in use by
* any of the workers. If necessary cancel the work.
*/
void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
unsigned long flags;
struct alias_lcu *lcu;
struct alias_server *server;
int was_pending;
struct dasd_uid uid;
lcu = private->lcu;
/* nothing to do if already disconnected */
if (!lcu)
return;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&lcu->lock, flags);
/* make sure that the workers don't use this device */
if (device == lcu->suc_data.device) {
spin_unlock_irqrestore(&lcu->lock, flags);
cancel_work_sync(&lcu->suc_data.worker);
spin_lock_irqsave(&lcu->lock, flags);
if (device == lcu->suc_data.device) {
dasd_put_device(device);
lcu->suc_data.device = NULL;
}
}
was_pending = 0;
if (device == lcu->ruac_data.device) {
spin_unlock_irqrestore(&lcu->lock, flags);
was_pending = 1;
cancel_delayed_work_sync(&lcu->ruac_data.dwork);
spin_lock_irqsave(&lcu->lock, flags);
if (device == lcu->ruac_data.device) {
dasd_put_device(device);
lcu->ruac_data.device = NULL;
}
}
private->lcu = NULL;
spin_unlock_irqrestore(&lcu->lock, flags);
spin_lock_irqsave(&aliastree.lock, flags);
spin_lock(&lcu->lock);
list_del_init(&device->alias_list);
if (list_empty(&lcu->grouplist) &&
list_empty(&lcu->active_devices) &&
list_empty(&lcu->inactive_devices)) {
list_del(&lcu->lcu);
spin_unlock(&lcu->lock);
_free_lcu(lcu);
lcu = NULL;
} else {
if (was_pending)
_schedule_lcu_update(lcu, NULL);
spin_unlock(&lcu->lock);
}
server = _find_server(&uid);
if (server && list_empty(&server->lculist)) {
list_del(&server->server);
_free_server(server);
}
spin_unlock_irqrestore(&aliastree.lock, flags);
}
/*
* This function assumes that the unit address configuration stored
* in the lcu is up to date and will update the device uid before
* adding it to a pav group.
*/
static int _add_device_to_lcu(struct alias_lcu *lcu,
struct dasd_device *device,
struct dasd_device *pos)
{
struct dasd_eckd_private *private = device->private;
struct alias_pav_group *group;
struct dasd_uid uid;
spin_lock(get_ccwdev_lock(device->cdev));
private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
private->uid.base_unit_addr =
lcu->uac->unit[private->uid.real_unit_addr].base_ua;
uid = private->uid;
spin_unlock(get_ccwdev_lock(device->cdev));
/* if we have no PAV anyway, we don't need to bother with PAV groups */
if (lcu->pav == NO_PAV) {
list_move(&device->alias_list, &lcu->active_devices);
return 0;
}
group = _find_group(lcu, &uid);
if (!group) {
group = kzalloc(sizeof(*group), GFP_ATOMIC);
if (!group)
return -ENOMEM;
memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
group->uid.ssid = uid.ssid;
if (uid.type == UA_BASE_DEVICE)
group->uid.base_unit_addr = uid.real_unit_addr;
else
group->uid.base_unit_addr = uid.base_unit_addr;
memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
INIT_LIST_HEAD(&group->group);
INIT_LIST_HEAD(&group->baselist);
INIT_LIST_HEAD(&group->aliaslist);
list_add(&group->group, &lcu->grouplist);
}
if (uid.type == UA_BASE_DEVICE)
list_move(&device->alias_list, &group->baselist);
else
list_move(&device->alias_list, &group->aliaslist);
private->pavgroup = group;
return 0;
};
static void _remove_device_from_lcu(struct alias_lcu *lcu,
struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct alias_pav_group *group;
list_move(&device->alias_list, &lcu->inactive_devices);
group = private->pavgroup;
if (!group)
return;
private->pavgroup = NULL;
if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
list_del(&group->group);
kfree(group);
return;
}
if (group->next == device)
group->next = NULL;
};
static int
suborder_not_supported(struct dasd_ccw_req *cqr)
{
char *sense;
char reason;
char msg_format;
char msg_no;
/*
* intrc values ENODEV, ENOLINK and EPERM
* will be optained from sleep_on to indicate that no
* IO operation can be started
*/
if (cqr->intrc == -ENODEV)
return 1;
if (cqr->intrc == -ENOLINK)
return 1;
if (cqr->intrc == -EPERM)
return 1;
sense = dasd_get_sense(&cqr->irb);
if (!sense)
return 0;
reason = sense[0];
msg_format = (sense[7] & 0xF0);
msg_no = (sense[7] & 0x0F);
/* command reject, Format 0 MSG 4 - invalid parameter */
if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
return 1;
return 0;
}
static int read_unit_address_configuration(struct dasd_device *device,
struct alias_lcu *lcu)
{
struct dasd_psf_prssd_data *prssdp;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
unsigned long flags;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data)),
device, NULL);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->retries = 10;
cqr->expires = 20 * HZ;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = 0x0e; /* Read unit address configuration */
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->cda = (__u32)virt_to_phys(prssdp);
/* Read Subsystem Data - feature codes */
memset(lcu->uac, 0, sizeof(*(lcu->uac)));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*(lcu->uac));
ccw->cda = (__u32)virt_to_phys(lcu->uac);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* need to unset flag here to detect race with summary unit check */
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags &= ~NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
rc = dasd_sleep_on(cqr);
if (!rc)
goto out;
if (suborder_not_supported(cqr)) {
/* suborder not supported or device unusable for IO */
rc = -EOPNOTSUPP;
} else {
/* IO failed but should be retried */
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
}
out:
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
{
unsigned long flags;
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_device *device, *tempdev;
int i, rc;
struct dasd_eckd_private *private;
spin_lock_irqsave(&lcu->lock, flags);
list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
alias_list) {
list_move(&device->alias_list, &lcu->active_devices);
private = device->private;
private->pavgroup = NULL;
}
list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
alias_list) {
list_move(&device->alias_list, &lcu->active_devices);
private = device->private;
private->pavgroup = NULL;
}
list_del(&pavgroup->group);
kfree(pavgroup);
}
spin_unlock_irqrestore(&lcu->lock, flags);
rc = read_unit_address_configuration(refdev, lcu);
if (rc)
return rc;
spin_lock_irqsave(&lcu->lock, flags);
/*
* there is another update needed skip the remaining handling
* the data might already be outdated
* but especially do not add the device to an LCU with pending
* update
*/
if (lcu->flags & NEED_UAC_UPDATE)
goto out;
lcu->pav = NO_PAV;
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
switch (lcu->uac->unit[i].ua_type) {
case UA_BASE_PAV_ALIAS:
lcu->pav = BASE_PAV;
break;
case UA_HYPER_PAV_ALIAS:
lcu->pav = HYPER_PAV;
break;
}
if (lcu->pav != NO_PAV)
break;
}
list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
alias_list) {
_add_device_to_lcu(lcu, device, refdev);
}
out:
spin_unlock_irqrestore(&lcu->lock, flags);
return 0;
}
static void lcu_update_work(struct work_struct *work)
{
struct alias_lcu *lcu;
struct read_uac_work_data *ruac_data;
struct dasd_device *device;
unsigned long flags;
int rc;
ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
device = ruac_data->device;
rc = _lcu_update(device, lcu);
/*
* Need to check flags again, as there could have been another
* prepare_update or a new device a new device while we were still
* processing the data
*/
spin_lock_irqsave(&lcu->lock, flags);
if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
" alias data in lcu (rc = %d), retry later", rc);
if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
dasd_put_device(device);
} else {
dasd_put_device(device);
lcu->ruac_data.device = NULL;
lcu->flags &= ~UPDATE_PENDING;
}
spin_unlock_irqrestore(&lcu->lock, flags);
}
static int _schedule_lcu_update(struct alias_lcu *lcu,
struct dasd_device *device)
{
struct dasd_device *usedev = NULL;
struct alias_pav_group *group;
lcu->flags |= NEED_UAC_UPDATE;
if (lcu->ruac_data.device) {
/* already scheduled or running */
return 0;
}
if (device && !list_empty(&device->alias_list))
usedev = device;
if (!usedev && !list_empty(&lcu->grouplist)) {
group = list_first_entry(&lcu->grouplist,
struct alias_pav_group, group);
if (!list_empty(&group->baselist))
usedev = list_first_entry(&group->baselist,
struct dasd_device,
alias_list);
else if (!list_empty(&group->aliaslist))
usedev = list_first_entry(&group->aliaslist,
struct dasd_device,
alias_list);
}
if (!usedev && !list_empty(&lcu->active_devices)) {
usedev = list_first_entry(&lcu->active_devices,
struct dasd_device, alias_list);
}
/*
* if we haven't found a proper device yet, give up for now, the next
* device that will be set active will trigger an lcu update
*/
if (!usedev)
return -EINVAL;
dasd_get_device(usedev);
lcu->ruac_data.device = usedev;
if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
dasd_put_device(usedev);
return 0;
}
int dasd_alias_add_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
__u8 uaddr = private->uid.real_unit_addr;
struct alias_lcu *lcu = private->lcu;
unsigned long flags;
int rc;
rc = 0;
spin_lock_irqsave(&lcu->lock, flags);
/*
* Check if device and lcu type differ. If so, the uac data may be
* outdated and needs to be updated.
*/
if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
lcu->flags |= UPDATE_PENDING;
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"uid type mismatch - trigger rescan");
}
if (!(lcu->flags & UPDATE_PENDING)) {
rc = _add_device_to_lcu(lcu, device, device);
if (rc)
lcu->flags |= UPDATE_PENDING;
}
if (lcu->flags & UPDATE_PENDING) {
list_move(&device->alias_list, &lcu->active_devices);
private->pavgroup = NULL;
_schedule_lcu_update(lcu, device);
}
spin_unlock_irqrestore(&lcu->lock, flags);
return rc;
}
int dasd_alias_update_add_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
private->lcu->flags |= UPDATE_PENDING;
return dasd_alias_add_device(device);
}
int dasd_alias_remove_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct alias_lcu *lcu = private->lcu;
unsigned long flags;
/* nothing to do if already removed */
if (!lcu)
return 0;
spin_lock_irqsave(&lcu->lock, flags);
_remove_device_from_lcu(lcu, device);
spin_unlock_irqrestore(&lcu->lock, flags);
return 0;
}
struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
{
struct dasd_eckd_private *alias_priv, *private = base_device->private;
struct alias_lcu *lcu = private->lcu;
struct dasd_device *alias_device;
struct alias_pav_group *group;
unsigned long flags;
if (!lcu)
return NULL;
if (lcu->pav == NO_PAV ||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
return NULL;
if (unlikely(!(private->features.feature[8] & 0x01))) {
/*
* PAV enabled but prefix not, very unlikely
* seems to be a lost pathgroup
* use base device to do IO
*/
DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
"Prefix not enabled with PAV enabled\n");
return NULL;
}
spin_lock_irqsave(&lcu->lock, flags);
group = private->pavgroup;
if (!group) {
spin_unlock_irqrestore(&lcu->lock, flags);
return NULL;
}
alias_device = group->next;
if (!alias_device) {
if (list_empty(&group->aliaslist)) {
spin_unlock_irqrestore(&lcu->lock, flags);
return NULL;
} else {
alias_device = list_first_entry(&group->aliaslist,
struct dasd_device,
alias_list);
}
}
if (list_is_last(&alias_device->alias_list, &group->aliaslist))
group->next = list_first_entry(&group->aliaslist,
struct dasd_device, alias_list);
else
group->next = list_first_entry(&alias_device->alias_list,
struct dasd_device, alias_list);
spin_unlock_irqrestore(&lcu->lock, flags);
alias_priv = alias_device->private;
if ((alias_priv->count < private->count) && !alias_device->stopped &&
!test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
return alias_device;
else
return NULL;
}
/*
* Summary unit check handling depends on the way alias devices
* are handled so it is done here rather then in dasd_eckd.c
*/
static int reset_summary_unit_check(struct alias_lcu *lcu,
struct dasd_device *device,
char reason)
{
struct dasd_ccw_req *cqr;
int rc = 0;
struct ccw1 *ccw;
cqr = lcu->rsu_cqr;
memcpy((char *) &cqr->magic, "ECKD", 4);
ASCEBC((char *) &cqr->magic, 4);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 16;
ccw->cda = (__u32)virt_to_phys(cqr->data);
((char *)cqr->data)[0] = reason;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->retries = 255; /* set retry counter to enable basic ERP */
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->expires = 5 * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
return rc;
}
static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
{
struct alias_pav_group *pavgroup;
struct dasd_device *device;
struct dasd_eckd_private *private;
/* active and inactive list can contain alias as well as base devices */
list_for_each_entry(device, &lcu->active_devices, alias_list) {
private = device->private;
if (private->uid.type != UA_BASE_DEVICE)
continue;
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
private = device->private;
if (private->uid.type != UA_BASE_DEVICE)
continue;
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(device, &pavgroup->baselist, alias_list) {
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
}
}
}
static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
{
struct alias_pav_group *pavgroup;
struct dasd_device *device, *temp;
struct dasd_eckd_private *private;
unsigned long flags;
LIST_HEAD(active);
/*
* Problem here ist that dasd_flush_device_queue may wait
* for termination of a request to complete. We can't keep
* the lcu lock during that time, so we must assume that
* the lists may have changed.
* Idea: first gather all active alias devices in a separate list,
* then flush the first element of this list unlocked, and afterwards
* check if it is still on the list before moving it to the
* active_devices list.
*/
spin_lock_irqsave(&lcu->lock, flags);
list_for_each_entry_safe(device, temp, &lcu->active_devices,
alias_list) {
private = device->private;
if (private->uid.type == UA_BASE_DEVICE)
continue;
list_move(&device->alias_list, &active);
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_splice_init(&pavgroup->aliaslist, &active);
}
while (!list_empty(&active)) {
device = list_first_entry(&active, struct dasd_device,
alias_list);
spin_unlock_irqrestore(&lcu->lock, flags);
dasd_flush_device_queue(device);
spin_lock_irqsave(&lcu->lock, flags);
/*
* only move device around if it wasn't moved away while we
* were waiting for the flush
*/
if (device == list_first_entry(&active,
struct dasd_device, alias_list)) {
list_move(&device->alias_list, &lcu->active_devices);
private = device->private;
private->pavgroup = NULL;
}
}
spin_unlock_irqrestore(&lcu->lock, flags);
}
static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
{
struct alias_pav_group *pavgroup;
struct dasd_device *device;
list_for_each_entry(device, &lcu->active_devices, alias_list) {
spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
spin_unlock(get_ccwdev_lock(device->cdev));
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
spin_unlock(get_ccwdev_lock(device->cdev));
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(device, &pavgroup->baselist, alias_list) {
spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
spin_unlock(get_ccwdev_lock(device->cdev));
}
list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
spin_unlock(get_ccwdev_lock(device->cdev));
}
}
}
static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
{
struct alias_pav_group *pavgroup;
struct dasd_device *device;
list_for_each_entry(device, &lcu->active_devices, alias_list) {
spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock(get_ccwdev_lock(device->cdev));
}
list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock(get_ccwdev_lock(device->cdev));
}
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(device, &pavgroup->baselist, alias_list) {
spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock(get_ccwdev_lock(device->cdev));
}
list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
spin_lock(get_ccwdev_lock(device->cdev));
dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
spin_unlock(get_ccwdev_lock(device->cdev));
}
}
}
static void summary_unit_check_handling_work(struct work_struct *work)
{
struct alias_lcu *lcu;
struct summary_unit_check_work_data *suc_data;
unsigned long flags;
struct dasd_device *device;
suc_data = container_of(work, struct summary_unit_check_work_data,
worker);
lcu = container_of(suc_data, struct alias_lcu, suc_data);
device = suc_data->device;
/* 1. flush alias devices */
flush_all_alias_devices_on_lcu(lcu);
/* 2. reset summary unit check */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
dasd_device_remove_stop_bits(device,
(DASD_STOPPED_SU | DASD_STOPPED_PENDING));
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
reset_summary_unit_check(lcu, device, suc_data->reason);
spin_lock_irqsave(&lcu->lock, flags);
_unstop_all_devices_on_lcu(lcu);
_restart_all_base_devices_on_lcu(lcu);
/* 3. read new alias configuration */
_schedule_lcu_update(lcu, device);
lcu->suc_data.device = NULL;
dasd_put_device(device);
spin_unlock_irqrestore(&lcu->lock, flags);
}
void dasd_alias_handle_summary_unit_check(struct work_struct *work)
{
struct dasd_device *device = container_of(work, struct dasd_device,
suc_work);
struct dasd_eckd_private *private = device->private;
struct alias_lcu *lcu;
unsigned long flags;
lcu = private->lcu;
if (!lcu) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"device not ready to handle summary"
" unit check (no lcu structure)");
goto out;
}
spin_lock_irqsave(&lcu->lock, flags);
/* If this device is about to be removed just return and wait for
* the next interrupt on a different device
*/
if (list_empty(&device->alias_list)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"device is in offline processing,"
" don't do summary unit check handling");
goto out_unlock;
}
if (lcu->suc_data.device) {
/* already scheduled or running */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"previous instance of summary unit check worker"
" still pending");
goto out_unlock;
}
_stop_all_devices_on_lcu(lcu);
/* prepare for lcu_update */
lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
lcu->suc_data.reason = private->suc_reason;
lcu->suc_data.device = device;
dasd_get_device(device);
if (!schedule_work(&lcu->suc_data.worker))
dasd_put_device(device);
out_unlock:
spin_unlock_irqrestore(&lcu->lock, flags);
out:
clear_bit(DASD_FLAG_SUC, &device->flags);
dasd_put_device(device);
};
| linux-master | drivers/s390/block/dasd_alias.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Block driver for s390 storage class memory.
*
* Copyright IBM Corp. 2012
* Author(s): Sebastian Ott <[email protected]>
*/
#define KMSG_COMPONENT "scm_block"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <asm/eadm.h>
#include "scm_blk.h"
debug_info_t *scm_debug;
static int scm_major;
static mempool_t *aidaw_pool;
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(inactive_requests);
static unsigned int nr_requests = 64;
static unsigned int nr_requests_per_io = 8;
static atomic_t nr_devices = ATOMIC_INIT(0);
module_param(nr_requests, uint, S_IRUGO);
MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
module_param(nr_requests_per_io, uint, S_IRUGO);
MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
MODULE_LICENSE("GPL");
MODULE_ALIAS("scm:scmdev*");
static void __scm_free_rq(struct scm_request *scmrq)
{
struct aob_rq_header *aobrq = to_aobrq(scmrq);
free_page((unsigned long) scmrq->aob);
kfree(scmrq->request);
kfree(aobrq);
}
static void scm_free_rqs(void)
{
struct list_head *iter, *safe;
struct scm_request *scmrq;
spin_lock_irq(&list_lock);
list_for_each_safe(iter, safe, &inactive_requests) {
scmrq = list_entry(iter, struct scm_request, list);
list_del(&scmrq->list);
__scm_free_rq(scmrq);
}
spin_unlock_irq(&list_lock);
mempool_destroy(aidaw_pool);
}
static int __scm_alloc_rq(void)
{
struct aob_rq_header *aobrq;
struct scm_request *scmrq;
aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
if (!aobrq)
return -ENOMEM;
scmrq = (void *) aobrq->data;
scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
if (!scmrq->aob)
goto free;
scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
GFP_KERNEL);
if (!scmrq->request)
goto free;
INIT_LIST_HEAD(&scmrq->list);
spin_lock_irq(&list_lock);
list_add(&scmrq->list, &inactive_requests);
spin_unlock_irq(&list_lock);
return 0;
free:
__scm_free_rq(scmrq);
return -ENOMEM;
}
static int scm_alloc_rqs(unsigned int nrqs)
{
int ret = 0;
aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
if (!aidaw_pool)
return -ENOMEM;
while (nrqs-- && !ret)
ret = __scm_alloc_rq();
return ret;
}
static struct scm_request *scm_request_fetch(void)
{
struct scm_request *scmrq = NULL;
spin_lock_irq(&list_lock);
if (list_empty(&inactive_requests))
goto out;
scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
list_del(&scmrq->list);
out:
spin_unlock_irq(&list_lock);
return scmrq;
}
static void scm_request_done(struct scm_request *scmrq)
{
unsigned long flags;
struct msb *msb;
u64 aidaw;
int i;
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
msb = &scmrq->aob->msb[i];
aidaw = msb->data_addr;
if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
IS_ALIGNED(aidaw, PAGE_SIZE))
mempool_free(virt_to_page((void *)aidaw), aidaw_pool);
}
spin_lock_irqsave(&list_lock, flags);
list_add(&scmrq->list, &inactive_requests);
spin_unlock_irqrestore(&list_lock, flags);
}
static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
{
return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
}
static inline struct aidaw *scm_aidaw_alloc(void)
{
struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
return page ? page_address(page) : NULL;
}
static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
{
unsigned long _aidaw = (unsigned long) aidaw;
unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
}
struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
{
struct aidaw *aidaw;
if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
return scmrq->next_aidaw;
aidaw = scm_aidaw_alloc();
if (aidaw)
memset(aidaw, 0, PAGE_SIZE);
return aidaw;
}
static int scm_request_prepare(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
struct scm_device *scmdev = bdev->gendisk->private_data;
int pos = scmrq->aob->request.msb_count;
struct msb *msb = &scmrq->aob->msb[pos];
struct request *req = scmrq->request[pos];
struct req_iterator iter;
struct aidaw *aidaw;
struct bio_vec bv;
aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
if (!aidaw)
return -ENOMEM;
msb->bs = MSB_BS_4K;
scmrq->aob->request.msb_count++;
msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
msb->flags |= MSB_FLAG_IDA;
msb->data_addr = (u64) aidaw;
rq_for_each_segment(bv, req, iter) {
WARN_ON(bv.bv_offset);
msb->blk_count += bv.bv_len >> 12;
aidaw->data_addr = (u64) page_address(bv.bv_page);
aidaw++;
}
scmrq->next_aidaw = aidaw;
return 0;
}
static inline void scm_request_set(struct scm_request *scmrq,
struct request *req)
{
scmrq->request[scmrq->aob->request.msb_count] = req;
}
static inline void scm_request_init(struct scm_blk_dev *bdev,
struct scm_request *scmrq)
{
struct aob_rq_header *aobrq = to_aobrq(scmrq);
struct aob *aob = scmrq->aob;
memset(scmrq->request, 0,
nr_requests_per_io * sizeof(scmrq->request[0]));
memset(aob, 0, sizeof(*aob));
aobrq->scmdev = bdev->scmdev;
aob->request.cmd_code = ARQB_CMD_MOVE;
aob->request.data = (u64) aobrq;
scmrq->bdev = bdev;
scmrq->retries = 4;
scmrq->error = BLK_STS_OK;
/* We don't use all msbs - place aidaws at the end of the aob page. */
scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
}
static void scm_request_requeue(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
int i;
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
blk_mq_requeue_request(scmrq->request[i], false);
atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
blk_mq_kick_requeue_list(bdev->rq);
}
static void scm_request_finish(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
blk_status_t *error;
int i;
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
error = blk_mq_rq_to_pdu(scmrq->request[i]);
*error = scmrq->error;
if (likely(!blk_should_fake_timeout(scmrq->request[i]->q)))
blk_mq_complete_request(scmrq->request[i]);
}
atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
}
static void scm_request_start(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
atomic_inc(&bdev->queued_reqs);
if (eadm_start_aob(scmrq->aob)) {
SCM_LOG(5, "no subchannel");
scm_request_requeue(scmrq);
}
}
struct scm_queue {
struct scm_request *scmrq;
spinlock_t lock;
};
static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd)
{
struct scm_device *scmdev = hctx->queue->queuedata;
struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
struct scm_queue *sq = hctx->driver_data;
struct request *req = qd->rq;
struct scm_request *scmrq;
spin_lock(&sq->lock);
if (!scm_permit_request(bdev, req)) {
spin_unlock(&sq->lock);
return BLK_STS_RESOURCE;
}
scmrq = sq->scmrq;
if (!scmrq) {
scmrq = scm_request_fetch();
if (!scmrq) {
SCM_LOG(5, "no request");
spin_unlock(&sq->lock);
return BLK_STS_RESOURCE;
}
scm_request_init(bdev, scmrq);
sq->scmrq = scmrq;
}
scm_request_set(scmrq, req);
if (scm_request_prepare(scmrq)) {
SCM_LOG(5, "aidaw alloc failed");
scm_request_set(scmrq, NULL);
if (scmrq->aob->request.msb_count)
scm_request_start(scmrq);
sq->scmrq = NULL;
spin_unlock(&sq->lock);
return BLK_STS_RESOURCE;
}
blk_mq_start_request(req);
if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
scm_request_start(scmrq);
sq->scmrq = NULL;
}
spin_unlock(&sq->lock);
return BLK_STS_OK;
}
static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int idx)
{
struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
if (!qd)
return -ENOMEM;
spin_lock_init(&qd->lock);
hctx->driver_data = qd;
return 0;
}
static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
{
struct scm_queue *qd = hctx->driver_data;
WARN_ON(qd->scmrq);
kfree(hctx->driver_data);
hctx->driver_data = NULL;
}
static void __scmrq_log_error(struct scm_request *scmrq)
{
struct aob *aob = scmrq->aob;
if (scmrq->error == BLK_STS_TIMEOUT)
SCM_LOG(1, "Request timeout");
else {
SCM_LOG(1, "Request error");
SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
}
if (scmrq->retries)
SCM_LOG(1, "Retry request");
else
pr_err("An I/O operation to SCM failed with rc=%d\n",
scmrq->error);
}
static void scm_blk_handle_error(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
unsigned long flags;
if (scmrq->error != BLK_STS_IOERR)
goto restart;
/* For -EIO the response block is valid. */
switch (scmrq->aob->response.eqc) {
case EQC_WR_PROHIBIT:
spin_lock_irqsave(&bdev->lock, flags);
if (bdev->state != SCM_WR_PROHIBIT)
pr_info("%lx: Write access to the SCM increment is suspended\n",
(unsigned long) bdev->scmdev->address);
bdev->state = SCM_WR_PROHIBIT;
spin_unlock_irqrestore(&bdev->lock, flags);
goto requeue;
default:
break;
}
restart:
if (!eadm_start_aob(scmrq->aob))
return;
requeue:
scm_request_requeue(scmrq);
}
void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
{
struct scm_request *scmrq = data;
scmrq->error = error;
if (error) {
__scmrq_log_error(scmrq);
if (scmrq->retries-- > 0) {
scm_blk_handle_error(scmrq);
return;
}
}
scm_request_finish(scmrq);
}
static void scm_blk_request_done(struct request *req)
{
blk_status_t *error = blk_mq_rq_to_pdu(req);
blk_mq_end_request(req, *error);
}
static const struct block_device_operations scm_blk_devops = {
.owner = THIS_MODULE,
};
static const struct blk_mq_ops scm_mq_ops = {
.queue_rq = scm_blk_request,
.complete = scm_blk_request_done,
.init_hctx = scm_blk_init_hctx,
.exit_hctx = scm_blk_exit_hctx,
};
int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
{
unsigned int devindex, nr_max_blk;
struct request_queue *rq;
int len, ret;
devindex = atomic_inc_return(&nr_devices) - 1;
/* scma..scmz + scmaa..scmzz */
if (devindex > 701) {
ret = -ENODEV;
goto out;
}
bdev->scmdev = scmdev;
bdev->state = SCM_OPER;
spin_lock_init(&bdev->lock);
atomic_set(&bdev->queued_reqs, 0);
bdev->tag_set.ops = &scm_mq_ops;
bdev->tag_set.cmd_size = sizeof(blk_status_t);
bdev->tag_set.nr_hw_queues = nr_requests;
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
bdev->tag_set.numa_node = NUMA_NO_NODE;
ret = blk_mq_alloc_tag_set(&bdev->tag_set);
if (ret)
goto out;
bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, scmdev);
if (IS_ERR(bdev->gendisk)) {
ret = PTR_ERR(bdev->gendisk);
goto out_tag;
}
rq = bdev->rq = bdev->gendisk->queue;
nr_max_blk = min(scmdev->nr_max_block,
(unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
blk_queue_logical_block_size(rq, 1 << 12);
blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
blk_queue_max_segments(rq, nr_max_blk);
blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
bdev->gendisk->private_data = scmdev;
bdev->gendisk->fops = &scm_blk_devops;
bdev->gendisk->major = scm_major;
bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
bdev->gendisk->minors = SCM_NR_PARTS;
len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
if (devindex > 25) {
len += snprintf(bdev->gendisk->disk_name + len,
DISK_NAME_LEN - len, "%c",
'a' + (devindex / 26) - 1);
devindex = devindex % 26;
}
snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
'a' + devindex);
/* 512 byte sectors */
set_capacity(bdev->gendisk, scmdev->size >> 9);
ret = device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
if (ret)
goto out_cleanup_disk;
return 0;
out_cleanup_disk:
put_disk(bdev->gendisk);
out_tag:
blk_mq_free_tag_set(&bdev->tag_set);
out:
atomic_dec(&nr_devices);
return ret;
}
void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
{
del_gendisk(bdev->gendisk);
put_disk(bdev->gendisk);
blk_mq_free_tag_set(&bdev->tag_set);
}
void scm_blk_set_available(struct scm_blk_dev *bdev)
{
unsigned long flags;
spin_lock_irqsave(&bdev->lock, flags);
if (bdev->state == SCM_WR_PROHIBIT)
pr_info("%lx: Write access to the SCM increment is restored\n",
(unsigned long) bdev->scmdev->address);
bdev->state = SCM_OPER;
spin_unlock_irqrestore(&bdev->lock, flags);
}
static bool __init scm_blk_params_valid(void)
{
if (!nr_requests_per_io || nr_requests_per_io > 64)
return false;
return true;
}
static int __init scm_blk_init(void)
{
int ret = -EINVAL;
if (!scm_blk_params_valid())
goto out;
ret = register_blkdev(0, "scm");
if (ret < 0)
goto out;
scm_major = ret;
ret = scm_alloc_rqs(nr_requests);
if (ret)
goto out_free;
scm_debug = debug_register("scm_log", 16, 1, 16);
if (!scm_debug) {
ret = -ENOMEM;
goto out_free;
}
debug_register_view(scm_debug, &debug_hex_ascii_view);
debug_set_level(scm_debug, 2);
ret = scm_drv_init();
if (ret)
goto out_dbf;
return ret;
out_dbf:
debug_unregister(scm_debug);
out_free:
scm_free_rqs();
unregister_blkdev(scm_major, "scm");
out:
return ret;
}
module_init(scm_blk_init);
static void __exit scm_blk_cleanup(void)
{
scm_drv_cleanup();
debug_unregister(scm_debug);
scm_free_rqs();
unregister_blkdev(scm_major, "scm");
}
module_exit(scm_blk_cleanup);
| linux-master | drivers/s390/block/scm_blk.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Horst Hummel <[email protected]>
* Holger Smolinski <[email protected]>
* Bugreports.to..: <[email protected]>
* Copyright IBM Corp. 2000, 2001
*
*/
#define KMSG_COMPONENT "dasd-eckd"
#include <linux/timer.h>
#include <asm/idals.h>
#define PRINTK_HEADER "dasd_erp(3990): "
#include "dasd_int.h"
#include "dasd_eckd.h"
struct DCTL_data {
unsigned char subcommand; /* e.g Inhibit Write, Enable Write,... */
unsigned char modifier; /* Subcommand modifier */
unsigned short res; /* reserved */
} __attribute__ ((packed));
/*
*****************************************************************************
* SECTION ERP HANDLING
*****************************************************************************
*/
/*
*****************************************************************************
* 24 and 32 byte sense ERP functions
*****************************************************************************
*/
/*
* DASD_3990_ERP_CLEANUP
*
* DESCRIPTION
* Removes the already build but not necessary ERP request and sets
* the status of the original cqr / erp to the given (final) status
*
* PARAMETER
* erp request to be blocked
* final_status either DASD_CQR_DONE or DASD_CQR_FAILED
*
* RETURN VALUES
* cqr original cqr
*/
static struct dasd_ccw_req *
dasd_3990_erp_cleanup(struct dasd_ccw_req * erp, char final_status)
{
struct dasd_ccw_req *cqr = erp->refers;
dasd_free_erp_request(erp, erp->memdev);
cqr->status = final_status;
return cqr;
} /* end dasd_3990_erp_cleanup */
/*
* DASD_3990_ERP_BLOCK_QUEUE
*
* DESCRIPTION
* Block the given device request queue to prevent from further
* processing until the started timer has expired or an related
* interrupt was received.
*/
static void dasd_3990_erp_block_queue(struct dasd_ccw_req *erp, int expires)
{
struct dasd_device *device = erp->startdev;
unsigned long flags;
DBF_DEV_EVENT(DBF_INFO, device,
"blocking request queue for %is", expires/HZ);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
dasd_device_set_stop_bits(device, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
erp->status = DASD_CQR_FILLED;
if (erp->block)
dasd_block_set_timer(erp->block, expires);
else
dasd_device_set_timer(device, expires);
}
/*
* DASD_3990_ERP_INT_REQ
*
* DESCRIPTION
* Handles 'Intervention Required' error.
* This means either device offline or not installed.
*
* PARAMETER
* erp current erp
* RETURN VALUES
* erp modified erp
*/
static struct dasd_ccw_req *
dasd_3990_erp_int_req(struct dasd_ccw_req * erp)
{
struct dasd_device *device = erp->startdev;
/* first time set initial retry counter and erp_function */
/* and retry once without blocking queue */
/* (this enables easier enqueing of the cqr) */
if (erp->function != dasd_3990_erp_int_req) {
erp->retries = 256;
erp->function = dasd_3990_erp_int_req;
} else {
/* issue a message and wait for 'device ready' interrupt */
dev_err(&device->cdev->dev,
"is offline or not installed - "
"INTERVENTION REQUIRED!!\n");
dasd_3990_erp_block_queue(erp, 60*HZ);
}
return erp;
} /* end dasd_3990_erp_int_req */
/*
* DASD_3990_ERP_ALTERNATE_PATH
*
* DESCRIPTION
* Repeat the operation on a different channel path.
* If all alternate paths have been tried, the request is posted with a
* permanent error.
*
* PARAMETER
* erp pointer to the current ERP
*
* RETURN VALUES
* erp modified pointer to the ERP
*/
static void
dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
{
struct dasd_device *device = erp->startdev;
__u8 opm;
unsigned long flags;
/* try alternate valid path */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
opm = ccw_device_get_path_mask(device->cdev);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (erp->lpm == 0)
erp->lpm = dasd_path_get_opm(device) &
~(erp->irb.esw.esw0.sublog.lpum);
else
erp->lpm &= ~(erp->irb.esw.esw0.sublog.lpum);
if ((erp->lpm & opm) != 0x00) {
DBF_DEV_EVENT(DBF_WARNING, device,
"try alternate lpm=%x (lpum=%x / opm=%x)",
erp->lpm, erp->irb.esw.esw0.sublog.lpum, opm);
/* reset status to submit the request again... */
erp->status = DASD_CQR_FILLED;
erp->retries = 10;
} else {
dev_err(&device->cdev->dev,
"The DASD cannot be reached on any path (lpum=%x"
"/opm=%x)\n", erp->irb.esw.esw0.sublog.lpum, opm);
/* post request with permanent error */
erp->status = DASD_CQR_FAILED;
}
} /* end dasd_3990_erp_alternate_path */
/*
* DASD_3990_ERP_DCTL
*
* DESCRIPTION
* Setup cqr to do the Diagnostic Control (DCTL) command with an
* Inhibit Write subcommand (0x20) and the given modifier.
*
* PARAMETER
* erp pointer to the current (failed) ERP
* modifier subcommand modifier
*
* RETURN VALUES
* dctl_cqr pointer to NEW dctl_cqr
*
*/
static struct dasd_ccw_req *
dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
{
struct dasd_device *device = erp->startdev;
struct DCTL_data *DCTL_data;
struct ccw1 *ccw;
struct dasd_ccw_req *dctl_cqr;
dctl_cqr = dasd_alloc_erp_request(erp->magic, 1,
sizeof(struct DCTL_data),
device);
if (IS_ERR(dctl_cqr)) {
dev_err(&device->cdev->dev,
"Unable to allocate DCTL-CQR\n");
erp->status = DASD_CQR_FAILED;
return erp;
}
DCTL_data = dctl_cqr->data;
DCTL_data->subcommand = 0x02; /* Inhibit Write */
DCTL_data->modifier = modifier;
ccw = dctl_cqr->cpaddr;
memset(ccw, 0, sizeof(struct ccw1));
ccw->cmd_code = CCW_CMD_DCTL;
ccw->count = 4;
ccw->cda = (__u32)virt_to_phys(DCTL_data);
dctl_cqr->flags = erp->flags;
dctl_cqr->function = dasd_3990_erp_DCTL;
dctl_cqr->refers = erp;
dctl_cqr->startdev = device;
dctl_cqr->memdev = device;
dctl_cqr->magic = erp->magic;
dctl_cqr->expires = 5 * 60 * HZ;
dctl_cqr->retries = 2;
dctl_cqr->buildclk = get_tod_clock();
dctl_cqr->status = DASD_CQR_FILLED;
return dctl_cqr;
} /* end dasd_3990_erp_DCTL */
/*
* DASD_3990_ERP_ACTION_1
*
* DESCRIPTION
* Setup ERP to do the ERP action 1 (see Reference manual).
* Repeat the operation on a different channel path.
* As deviation from the recommended recovery action, we reset the path mask
* after we have tried each path and go through all paths a second time.
* This will cover situations where only one path at a time is actually down,
* but all paths fail and recover just with the same sequence and timing as
* we try to use them (flapping links).
* If all alternate paths have been tried twice, the request is posted with
* a permanent error.
*
* PARAMETER
* erp pointer to the current ERP
*
* RETURN VALUES
* erp pointer to the ERP
*
*/
static struct dasd_ccw_req *dasd_3990_erp_action_1_sec(struct dasd_ccw_req *erp)
{
erp->function = dasd_3990_erp_action_1_sec;
dasd_3990_erp_alternate_path(erp);
return erp;
}
static struct dasd_ccw_req *dasd_3990_erp_action_1(struct dasd_ccw_req *erp)
{
erp->function = dasd_3990_erp_action_1;
dasd_3990_erp_alternate_path(erp);
if (erp->status == DASD_CQR_FAILED &&
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
erp->status = DASD_CQR_FILLED;
erp->retries = 10;
erp->lpm = dasd_path_get_opm(erp->startdev);
erp->function = dasd_3990_erp_action_1_sec;
}
return erp;
} /* end dasd_3990_erp_action_1(b) */
/*
* DASD_3990_ERP_ACTION_4
*
* DESCRIPTION
* Setup ERP to do the ERP action 4 (see Reference manual).
* Set the current request to PENDING to block the CQR queue for that device
* until the state change interrupt appears.
* Use a timer (20 seconds) to retry the cqr if the interrupt is still
* missing.
*
* PARAMETER
* sense sense data of the actual error
* erp pointer to the current ERP
*
* RETURN VALUES
* erp pointer to the ERP
*
*/
static struct dasd_ccw_req *
dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
{
struct dasd_device *device = erp->startdev;
/* first time set initial retry counter and erp_function */
/* and retry once without waiting for state change pending */
/* interrupt (this enables easier enqueing of the cqr) */
if (erp->function != dasd_3990_erp_action_4) {
DBF_DEV_EVENT(DBF_INFO, device, "%s",
"dasd_3990_erp_action_4: first time retry");
erp->retries = 256;
erp->function = dasd_3990_erp_action_4;
} else {
if (sense && (sense[25] == 0x1D)) { /* state change pending */
DBF_DEV_EVENT(DBF_INFO, device,
"waiting for state change pending "
"interrupt, %d retries left",
erp->retries);
dasd_3990_erp_block_queue(erp, 30*HZ);
} else if (sense && (sense[25] == 0x1E)) { /* busy */
DBF_DEV_EVENT(DBF_INFO, device,
"busy - redriving request later, "
"%d retries left",
erp->retries);
dasd_3990_erp_block_queue(erp, HZ);
} else {
/* no state change pending - retry */
DBF_DEV_EVENT(DBF_INFO, device,
"redriving request immediately, "
"%d retries left",
erp->retries);
erp->status = DASD_CQR_FILLED;
}
}
return erp;
} /* end dasd_3990_erp_action_4 */
/*
*****************************************************************************
* 24 byte sense ERP functions (only)
*****************************************************************************
*/
/*
* DASD_3990_ERP_ACTION_5
*
* DESCRIPTION
* Setup ERP to do the ERP action 5 (see Reference manual).
* NOTE: Further handling is done in xxx_further_erp after the retries.
*
* PARAMETER
* erp pointer to the current ERP
*
* RETURN VALUES
* erp pointer to the ERP
*
*/
static struct dasd_ccw_req *
dasd_3990_erp_action_5(struct dasd_ccw_req * erp)
{
/* first of all retry */
erp->retries = 10;
erp->function = dasd_3990_erp_action_5;
return erp;
} /* end dasd_3990_erp_action_5 */
/*
* DASD_3990_HANDLE_ENV_DATA
*
* DESCRIPTION
* Handles 24 byte 'Environmental data present'.
* Does a analysis of the sense data (message Format)
* and prints the error messages.
*
* PARAMETER
* sense current sense data
*
* RETURN VALUES
* void
*/
static void
dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
{
struct dasd_device *device = erp->startdev;
char msg_format = (sense[7] & 0xF0);
char msg_no = (sense[7] & 0x0F);
char errorstring[ERRORLENGTH];
switch (msg_format) {
case 0x00: /* Format 0 - Program or System Checks */
if (sense[1] & 0x10) { /* check message to operator bit */
switch (msg_no) {
case 0x00: /* No Message */
break;
case 0x01:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Invalid Command\n");
break;
case 0x02:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Invalid Command "
"Sequence\n");
break;
case 0x03:
dev_warn(&device->cdev->dev,
"FORMAT 0 - CCW Count less than "
"required\n");
break;
case 0x04:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Invalid Parameter\n");
break;
case 0x05:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Diagnostic of Special"
" Command Violates File Mask\n");
break;
case 0x07:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Channel Returned with "
"Incorrect retry CCW\n");
break;
case 0x08:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Reset Notification\n");
break;
case 0x09:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Storage Path Restart\n");
break;
case 0x0A:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Channel requested "
"... %02x\n", sense[8]);
break;
case 0x0B:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Invalid Defective/"
"Alternate Track Pointer\n");
break;
case 0x0C:
dev_warn(&device->cdev->dev,
"FORMAT 0 - DPS Installation "
"Check\n");
break;
case 0x0E:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Command Invalid on "
"Secondary Address\n");
break;
case 0x0F:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Status Not As "
"Required: reason %02x\n",
sense[8]);
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Reserved\n");
}
} else {
switch (msg_no) {
case 0x00: /* No Message */
break;
case 0x01:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Device Error "
"Source\n");
break;
case 0x02:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Reserved\n");
break;
case 0x03:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Device Fenced - "
"device = %02x\n", sense[4]);
break;
case 0x04:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Data Pinned for "
"Device\n");
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT 0 - Reserved\n");
}
}
break;
case 0x10: /* Format 1 - Device Equipment Checks */
switch (msg_no) {
case 0x00: /* No Message */
break;
case 0x01:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Device Status 1 not as "
"expected\n");
break;
case 0x03:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Index missing\n");
break;
case 0x04:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Interruption cannot be "
"reset\n");
break;
case 0x05:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Device did not respond to "
"selection\n");
break;
case 0x06:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Device check-2 error or Set "
"Sector is not complete\n");
break;
case 0x07:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Head address does not "
"compare\n");
break;
case 0x08:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Device status 1 not valid\n");
break;
case 0x09:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Device not ready\n");
break;
case 0x0A:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Track physical address did "
"not compare\n");
break;
case 0x0B:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Missing device address bit\n");
break;
case 0x0C:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Drive motor switch is off\n");
break;
case 0x0D:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Seek incomplete\n");
break;
case 0x0E:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Cylinder address did not "
"compare\n");
break;
case 0x0F:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Offset active cannot be "
"reset\n");
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT 1 - Reserved\n");
}
break;
case 0x20: /* Format 2 - 3990 Equipment Checks */
switch (msg_no) {
case 0x08:
dev_warn(&device->cdev->dev,
"FORMAT 2 - 3990 check-2 error\n");
break;
case 0x0E:
dev_warn(&device->cdev->dev,
"FORMAT 2 - Support facility errors\n");
break;
case 0x0F:
dev_warn(&device->cdev->dev,
"FORMAT 2 - Microcode detected error "
"%02x\n",
sense[8]);
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT 2 - Reserved\n");
}
break;
case 0x30: /* Format 3 - 3990 Control Checks */
switch (msg_no) {
case 0x0F:
dev_warn(&device->cdev->dev,
"FORMAT 3 - Allegiance terminated\n");
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT 3 - Reserved\n");
}
break;
case 0x40: /* Format 4 - Data Checks */
switch (msg_no) {
case 0x00:
dev_warn(&device->cdev->dev,
"FORMAT 4 - Home address area error\n");
break;
case 0x01:
dev_warn(&device->cdev->dev,
"FORMAT 4 - Count area error\n");
break;
case 0x02:
dev_warn(&device->cdev->dev,
"FORMAT 4 - Key area error\n");
break;
case 0x03:
dev_warn(&device->cdev->dev,
"FORMAT 4 - Data area error\n");
break;
case 0x04:
dev_warn(&device->cdev->dev,
"FORMAT 4 - No sync byte in home address "
"area\n");
break;
case 0x05:
dev_warn(&device->cdev->dev,
"FORMAT 4 - No sync byte in count address "
"area\n");
break;
case 0x06:
dev_warn(&device->cdev->dev,
"FORMAT 4 - No sync byte in key area\n");
break;
case 0x07:
dev_warn(&device->cdev->dev,
"FORMAT 4 - No sync byte in data area\n");
break;
case 0x08:
dev_warn(&device->cdev->dev,
"FORMAT 4 - Home address area error; "
"offset active\n");
break;
case 0x09:
dev_warn(&device->cdev->dev,
"FORMAT 4 - Count area error; offset "
"active\n");
break;
case 0x0A:
dev_warn(&device->cdev->dev,
"FORMAT 4 - Key area error; offset "
"active\n");
break;
case 0x0B:
dev_warn(&device->cdev->dev,
"FORMAT 4 - Data area error; "
"offset active\n");
break;
case 0x0C:
dev_warn(&device->cdev->dev,
"FORMAT 4 - No sync byte in home "
"address area; offset active\n");
break;
case 0x0D:
dev_warn(&device->cdev->dev,
"FORMAT 4 - No sync byte in count "
"address area; offset active\n");
break;
case 0x0E:
dev_warn(&device->cdev->dev,
"FORMAT 4 - No sync byte in key area; "
"offset active\n");
break;
case 0x0F:
dev_warn(&device->cdev->dev,
"FORMAT 4 - No sync byte in data area; "
"offset active\n");
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT 4 - Reserved\n");
}
break;
case 0x50: /* Format 5 - Data Check with displacement information */
switch (msg_no) {
case 0x00:
dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the "
"home address area\n");
break;
case 0x01:
dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the count "
"area\n");
break;
case 0x02:
dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the key area\n");
break;
case 0x03:
dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the data "
"area\n");
break;
case 0x08:
dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the "
"home address area; offset active\n");
break;
case 0x09:
dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the count area; "
"offset active\n");
break;
case 0x0A:
dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the key area; "
"offset active\n");
break;
case 0x0B:
dev_warn(&device->cdev->dev,
"FORMAT 5 - Data Check in the data area; "
"offset active\n");
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT 5 - Reserved\n");
}
break;
case 0x60: /* Format 6 - Usage Statistics/Overrun Errors */
switch (msg_no) {
case 0x00:
dev_warn(&device->cdev->dev,
"FORMAT 6 - Overrun on channel A\n");
break;
case 0x01:
dev_warn(&device->cdev->dev,
"FORMAT 6 - Overrun on channel B\n");
break;
case 0x02:
dev_warn(&device->cdev->dev,
"FORMAT 6 - Overrun on channel C\n");
break;
case 0x03:
dev_warn(&device->cdev->dev,
"FORMAT 6 - Overrun on channel D\n");
break;
case 0x04:
dev_warn(&device->cdev->dev,
"FORMAT 6 - Overrun on channel E\n");
break;
case 0x05:
dev_warn(&device->cdev->dev,
"FORMAT 6 - Overrun on channel F\n");
break;
case 0x06:
dev_warn(&device->cdev->dev,
"FORMAT 6 - Overrun on channel G\n");
break;
case 0x07:
dev_warn(&device->cdev->dev,
"FORMAT 6 - Overrun on channel H\n");
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT 6 - Reserved\n");
}
break;
case 0x70: /* Format 7 - Device Connection Control Checks */
switch (msg_no) {
case 0x00:
dev_warn(&device->cdev->dev,
"FORMAT 7 - RCC initiated by a connection "
"check alert\n");
break;
case 0x01:
dev_warn(&device->cdev->dev,
"FORMAT 7 - RCC 1 sequence not "
"successful\n");
break;
case 0x02:
dev_warn(&device->cdev->dev,
"FORMAT 7 - RCC 1 and RCC 2 sequences not "
"successful\n");
break;
case 0x03:
dev_warn(&device->cdev->dev,
"FORMAT 7 - Invalid tag-in during "
"selection sequence\n");
break;
case 0x04:
dev_warn(&device->cdev->dev,
"FORMAT 7 - extra RCC required\n");
break;
case 0x05:
dev_warn(&device->cdev->dev,
"FORMAT 7 - Invalid DCC selection "
"response or timeout\n");
break;
case 0x06:
dev_warn(&device->cdev->dev,
"FORMAT 7 - Missing end operation; device "
"transfer complete\n");
break;
case 0x07:
dev_warn(&device->cdev->dev,
"FORMAT 7 - Missing end operation; device "
"transfer incomplete\n");
break;
case 0x08:
dev_warn(&device->cdev->dev,
"FORMAT 7 - Invalid tag-in for an "
"immediate command sequence\n");
break;
case 0x09:
dev_warn(&device->cdev->dev,
"FORMAT 7 - Invalid tag-in for an "
"extended command sequence\n");
break;
case 0x0A:
dev_warn(&device->cdev->dev,
"FORMAT 7 - 3990 microcode time out when "
"stopping selection\n");
break;
case 0x0B:
dev_warn(&device->cdev->dev,
"FORMAT 7 - No response to selection "
"after a poll interruption\n");
break;
case 0x0C:
dev_warn(&device->cdev->dev,
"FORMAT 7 - Permanent path error (DASD "
"controller not available)\n");
break;
case 0x0D:
dev_warn(&device->cdev->dev,
"FORMAT 7 - DASD controller not available"
" on disconnected command chain\n");
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT 7 - Reserved\n");
}
break;
case 0x80: /* Format 8 - Additional Device Equipment Checks */
switch (msg_no) {
case 0x00: /* No Message */
case 0x01:
dev_warn(&device->cdev->dev,
"FORMAT 8 - Error correction code "
"hardware fault\n");
break;
case 0x03:
dev_warn(&device->cdev->dev,
"FORMAT 8 - Unexpected end operation "
"response code\n");
break;
case 0x04:
dev_warn(&device->cdev->dev,
"FORMAT 8 - End operation with transfer "
"count not zero\n");
break;
case 0x05:
dev_warn(&device->cdev->dev,
"FORMAT 8 - End operation with transfer "
"count zero\n");
break;
case 0x06:
dev_warn(&device->cdev->dev,
"FORMAT 8 - DPS checks after a system "
"reset or selective reset\n");
break;
case 0x07:
dev_warn(&device->cdev->dev,
"FORMAT 8 - DPS cannot be filled\n");
break;
case 0x08:
dev_warn(&device->cdev->dev,
"FORMAT 8 - Short busy time-out during "
"device selection\n");
break;
case 0x09:
dev_warn(&device->cdev->dev,
"FORMAT 8 - DASD controller failed to "
"set or reset the long busy latch\n");
break;
case 0x0A:
dev_warn(&device->cdev->dev,
"FORMAT 8 - No interruption from device "
"during a command chain\n");
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT 8 - Reserved\n");
}
break;
case 0x90: /* Format 9 - Device Read, Write, and Seek Checks */
switch (msg_no) {
case 0x00:
break; /* No Message */
case 0x06:
dev_warn(&device->cdev->dev,
"FORMAT 9 - Device check-2 error\n");
break;
case 0x07:
dev_warn(&device->cdev->dev,
"FORMAT 9 - Head address did not "
"compare\n");
break;
case 0x0A:
dev_warn(&device->cdev->dev,
"FORMAT 9 - Track physical address did "
"not compare while oriented\n");
break;
case 0x0E:
dev_warn(&device->cdev->dev,
"FORMAT 9 - Cylinder address did not "
"compare\n");
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT 9 - Reserved\n");
}
break;
case 0xF0: /* Format F - Cache Storage Checks */
switch (msg_no) {
case 0x00:
dev_warn(&device->cdev->dev,
"FORMAT F - Operation Terminated\n");
break;
case 0x01:
dev_warn(&device->cdev->dev,
"FORMAT F - Subsystem Processing Error\n");
break;
case 0x02:
dev_warn(&device->cdev->dev,
"FORMAT F - Cache or nonvolatile storage "
"equipment failure\n");
break;
case 0x04:
dev_warn(&device->cdev->dev,
"FORMAT F - Caching terminated\n");
break;
case 0x06:
dev_warn(&device->cdev->dev,
"FORMAT F - Cache fast write access not "
"authorized\n");
break;
case 0x07:
dev_warn(&device->cdev->dev,
"FORMAT F - Track format incorrect\n");
break;
case 0x09:
dev_warn(&device->cdev->dev,
"FORMAT F - Caching reinitiated\n");
break;
case 0x0A:
dev_warn(&device->cdev->dev,
"FORMAT F - Nonvolatile storage "
"terminated\n");
break;
case 0x0B:
dev_warn(&device->cdev->dev,
"FORMAT F - Volume is suspended duplex\n");
/* call extended error reporting (EER) */
dasd_eer_write(device, erp->refers,
DASD_EER_PPRCSUSPEND);
break;
case 0x0C:
dev_warn(&device->cdev->dev,
"FORMAT F - Subsystem status cannot be "
"determined\n");
break;
case 0x0D:
dev_warn(&device->cdev->dev,
"FORMAT F - Caching status reset to "
"default\n");
break;
case 0x0E:
dev_warn(&device->cdev->dev,
"FORMAT F - DASD Fast Write inhibited\n");
break;
default:
dev_warn(&device->cdev->dev,
"FORMAT F - Reserved\n");
}
break;
default: /* unknown message format - should not happen
internal error 03 - unknown message format */
snprintf(errorstring, ERRORLENGTH, "03 %x02", msg_format);
dev_err(&device->cdev->dev,
"An error occurred in the DASD device driver, "
"reason=%s\n", errorstring);
break;
} /* end switch message format */
} /* end dasd_3990_handle_env_data */
/*
* DASD_3990_ERP_COM_REJ
*
* DESCRIPTION
* Handles 24 byte 'Command Reject' error.
*
* PARAMETER
* erp current erp_head
* sense current sense data
*
* RETURN VALUES
* erp 'new' erp_head - pointer to new ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
{
struct dasd_device *device = erp->startdev;
erp->function = dasd_3990_erp_com_rej;
/* env data present (ACTION 10 - retry should work) */
if (sense[2] & SNS2_ENV_DATA_PRESENT) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Command Reject - environmental data present");
dasd_3990_handle_env_data(erp, sense);
erp->retries = 5;
} else if (sense[1] & SNS1_WRITE_INHIBITED) {
dev_err(&device->cdev->dev, "An I/O request was rejected"
" because writing is inhibited\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
} else if (sense[7] == SNS7_INVALID_ON_SEC) {
dev_err(&device->cdev->dev, "An I/O request was rejected on a copy pair secondary device\n");
/* suppress dump of sense data for this error */
set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags);
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
} else {
/* fatal error - set status to FAILED
internal error 09 - Command Reject */
if (!test_bit(DASD_CQR_SUPPRESS_CR, &erp->flags))
dev_err(&device->cdev->dev,
"An error occurred in the DASD device driver, reason=09\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
}
return erp;
} /* end dasd_3990_erp_com_rej */
/*
* DASD_3990_ERP_BUS_OUT
*
* DESCRIPTION
* Handles 24 byte 'Bus Out Parity Check' error.
*
* PARAMETER
* erp current erp_head
* RETURN VALUES
* erp new erp_head - pointer to new ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_bus_out(struct dasd_ccw_req * erp)
{
struct dasd_device *device = erp->startdev;
/* first time set initial retry counter and erp_function */
/* and retry once without blocking queue */
/* (this enables easier enqueing of the cqr) */
if (erp->function != dasd_3990_erp_bus_out) {
erp->retries = 256;
erp->function = dasd_3990_erp_bus_out;
} else {
/* issue a message and wait for 'device ready' interrupt */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"bus out parity error or BOPC requested by "
"channel");
dasd_3990_erp_block_queue(erp, 60*HZ);
}
return erp;
} /* end dasd_3990_erp_bus_out */
/*
* DASD_3990_ERP_EQUIP_CHECK
*
* DESCRIPTION
* Handles 24 byte 'Equipment Check' error.
*
* PARAMETER
* erp current erp_head
* RETURN VALUES
* erp new erp_head - pointer to new ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
{
struct dasd_device *device = erp->startdev;
erp->function = dasd_3990_erp_equip_check;
if (sense[1] & SNS1_WRITE_INHIBITED) {
dev_info(&device->cdev->dev,
"Write inhibited path encountered\n");
/* vary path offline
internal error 04 - Path should be varied off-line.*/
dev_err(&device->cdev->dev, "An error occurred in the DASD "
"device driver, reason=%s\n", "04");
erp = dasd_3990_erp_action_1(erp);
} else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Equipment Check - " "environmental data present");
dasd_3990_handle_env_data(erp, sense);
erp = dasd_3990_erp_action_4(erp, sense);
} else if (sense[1] & SNS1_PERM_ERR) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Equipment Check - retry exhausted or "
"undesirable");
erp = dasd_3990_erp_action_1(erp);
} else {
/* all other equipment checks - Action 5 */
/* rest is done when retries == 0 */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Equipment check or processing error");
erp = dasd_3990_erp_action_5(erp);
}
return erp;
} /* end dasd_3990_erp_equip_check */
/*
* DASD_3990_ERP_DATA_CHECK
*
* DESCRIPTION
* Handles 24 byte 'Data Check' error.
*
* PARAMETER
* erp current erp_head
* RETURN VALUES
* erp new erp_head - pointer to new ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_data_check(struct dasd_ccw_req * erp, char *sense)
{
struct dasd_device *device = erp->startdev;
erp->function = dasd_3990_erp_data_check;
if (sense[2] & SNS2_CORRECTABLE) { /* correctable data check */
/* issue message that the data has been corrected */
dev_emerg(&device->cdev->dev,
"Data recovered during retry with PCI "
"fetch mode active\n");
/* not possible to handle this situation in Linux */
panic("No way to inform application about the possibly "
"incorrect data");
} else if (sense[2] & SNS2_ENV_DATA_PRESENT) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Uncorrectable data check recovered secondary "
"addr of duplex pair");
erp = dasd_3990_erp_action_4(erp, sense);
} else if (sense[1] & SNS1_PERM_ERR) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Uncorrectable data check with internal "
"retry exhausted");
erp = dasd_3990_erp_action_1(erp);
} else {
/* all other data checks */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Uncorrectable data check with retry count "
"exhausted...");
erp = dasd_3990_erp_action_5(erp);
}
return erp;
} /* end dasd_3990_erp_data_check */
/*
* DASD_3990_ERP_OVERRUN
*
* DESCRIPTION
* Handles 24 byte 'Overrun' error.
*
* PARAMETER
* erp current erp_head
* RETURN VALUES
* erp new erp_head - pointer to new ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_overrun(struct dasd_ccw_req * erp, char *sense)
{
struct dasd_device *device = erp->startdev;
erp->function = dasd_3990_erp_overrun;
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Overrun - service overrun or overrun"
" error requested by channel");
erp = dasd_3990_erp_action_5(erp);
return erp;
} /* end dasd_3990_erp_overrun */
/*
* DASD_3990_ERP_INV_FORMAT
*
* DESCRIPTION
* Handles 24 byte 'Invalid Track Format' error.
*
* PARAMETER
* erp current erp_head
* RETURN VALUES
* erp new erp_head - pointer to new ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
{
struct dasd_device *device = erp->startdev;
erp->function = dasd_3990_erp_inv_format;
if (sense[2] & SNS2_ENV_DATA_PRESENT) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Track format error when destaging or "
"staging data");
dasd_3990_handle_env_data(erp, sense);
erp = dasd_3990_erp_action_4(erp, sense);
} else {
/* internal error 06 - The track format is not valid*/
dev_err(&device->cdev->dev,
"An error occurred in the DASD device driver, "
"reason=%s\n", "06");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
}
return erp;
} /* end dasd_3990_erp_inv_format */
/*
* DASD_3990_ERP_EOC
*
* DESCRIPTION
* Handles 24 byte 'End-of-Cylinder' error.
*
* PARAMETER
* erp already added default erp
* RETURN VALUES
* erp pointer to original (failed) cqr.
*/
static struct dasd_ccw_req *
dasd_3990_erp_EOC(struct dasd_ccw_req * default_erp, char *sense)
{
struct dasd_device *device = default_erp->startdev;
dev_err(&device->cdev->dev,
"The cylinder data for accessing the DASD is inconsistent\n");
/* implement action 7 - BUG */
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
} /* end dasd_3990_erp_EOC */
/*
* DASD_3990_ERP_ENV_DATA
*
* DESCRIPTION
* Handles 24 byte 'Environmental-Data Present' error.
*
* PARAMETER
* erp current erp_head
* RETURN VALUES
* erp new erp_head - pointer to new ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_env_data(struct dasd_ccw_req * erp, char *sense)
{
struct dasd_device *device = erp->startdev;
erp->function = dasd_3990_erp_env_data;
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Environmental data present");
dasd_3990_handle_env_data(erp, sense);
/* don't retry on disabled interface */
if (sense[7] != 0x0F) {
erp = dasd_3990_erp_action_4(erp, sense);
} else {
erp->status = DASD_CQR_FILLED;
}
return erp;
} /* end dasd_3990_erp_env_data */
/*
* DASD_3990_ERP_NO_REC
*
* DESCRIPTION
* Handles 24 byte 'No Record Found' error.
*
* PARAMETER
* erp already added default ERP
*
* RETURN VALUES
* erp new erp_head - pointer to new ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_no_rec(struct dasd_ccw_req * default_erp, char *sense)
{
struct dasd_device *device = default_erp->startdev;
/*
* In some cases the 'No Record Found' error might be expected and
* log messages shouldn't be written then.
* Check if the according suppress bit is set.
*/
if (!test_bit(DASD_CQR_SUPPRESS_NRF, &default_erp->flags))
dev_err(&device->cdev->dev,
"The specified record was not found\n");
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
} /* end dasd_3990_erp_no_rec */
/*
* DASD_3990_ERP_FILE_PROT
*
* DESCRIPTION
* Handles 24 byte 'File Protected' error.
* Note: Seek related recovery is not implemented because
* wee don't use the seek command yet.
*
* PARAMETER
* erp current erp_head
* RETURN VALUES
* erp new erp_head - pointer to new ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_file_prot(struct dasd_ccw_req * erp)
{
struct dasd_device *device = erp->startdev;
/*
* In some cases the 'File Protected' error might be expected and
* log messages shouldn't be written then.
* Check if the according suppress bit is set.
*/
if (!test_bit(DASD_CQR_SUPPRESS_FP, &erp->flags))
dev_err(&device->cdev->dev,
"Accessing the DASD failed because of a hardware error\n");
return dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
} /* end dasd_3990_erp_file_prot */
/*
* DASD_3990_ERP_INSPECT_ALIAS
*
* DESCRIPTION
* Checks if the original request was started on an alias device.
* If yes, it modifies the original and the erp request so that
* the erp request can be started on a base device.
*
* PARAMETER
* erp pointer to the currently created default ERP
*
* RETURN VALUES
* erp pointer to the modified ERP, or NULL
*/
static struct dasd_ccw_req *dasd_3990_erp_inspect_alias(
struct dasd_ccw_req *erp)
{
struct dasd_ccw_req *cqr = erp->refers;
char *sense;
if (cqr->block &&
(cqr->block->base != cqr->startdev)) {
sense = dasd_get_sense(&erp->refers->irb);
/*
* dynamic pav may have changed base alias mapping
*/
if (!test_bit(DASD_FLAG_OFFLINE, &cqr->startdev->flags) && sense
&& (sense[0] == 0x10) && (sense[7] == 0x0F)
&& (sense[8] == 0x67)) {
/*
* remove device from alias handling to prevent new
* requests from being scheduled on the
* wrong alias device
*/
dasd_alias_remove_device(cqr->startdev);
/* schedule worker to reload device */
dasd_reload_device(cqr->startdev);
}
if (cqr->startdev->features & DASD_FEATURE_ERPLOG) {
DBF_DEV_EVENT(DBF_ERR, cqr->startdev,
"ERP on alias device for request %p,"
" recover on base device %s", cqr,
dev_name(&cqr->block->base->cdev->dev));
}
dasd_eckd_reset_ccw_to_base_io(cqr);
erp->startdev = cqr->block->base;
erp->function = dasd_3990_erp_inspect_alias;
return erp;
} else
return NULL;
}
/*
* DASD_3990_ERP_INSPECT_24
*
* DESCRIPTION
* Does a detailed inspection of the 24 byte sense data
* and sets up a related error recovery action.
*
* PARAMETER
* sense sense data of the actual error
* erp pointer to the currently created default ERP
*
* RETURN VALUES
* erp pointer to the (addtitional) ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_inspect_24(struct dasd_ccw_req * erp, char *sense)
{
struct dasd_ccw_req *erp_filled = NULL;
/* Check sense for .... */
/* 'Command Reject' */
if ((erp_filled == NULL) && (sense[0] & SNS0_CMD_REJECT)) {
erp_filled = dasd_3990_erp_com_rej(erp, sense);
}
/* 'Intervention Required' */
if ((erp_filled == NULL) && (sense[0] & SNS0_INTERVENTION_REQ)) {
erp_filled = dasd_3990_erp_int_req(erp);
}
/* 'Bus Out Parity Check' */
if ((erp_filled == NULL) && (sense[0] & SNS0_BUS_OUT_CHECK)) {
erp_filled = dasd_3990_erp_bus_out(erp);
}
/* 'Equipment Check' */
if ((erp_filled == NULL) && (sense[0] & SNS0_EQUIPMENT_CHECK)) {
erp_filled = dasd_3990_erp_equip_check(erp, sense);
}
/* 'Data Check' */
if ((erp_filled == NULL) && (sense[0] & SNS0_DATA_CHECK)) {
erp_filled = dasd_3990_erp_data_check(erp, sense);
}
/* 'Overrun' */
if ((erp_filled == NULL) && (sense[0] & SNS0_OVERRUN)) {
erp_filled = dasd_3990_erp_overrun(erp, sense);
}
/* 'Invalid Track Format' */
if ((erp_filled == NULL) && (sense[1] & SNS1_INV_TRACK_FORMAT)) {
erp_filled = dasd_3990_erp_inv_format(erp, sense);
}
/* 'End-of-Cylinder' */
if ((erp_filled == NULL) && (sense[1] & SNS1_EOC)) {
erp_filled = dasd_3990_erp_EOC(erp, sense);
}
/* 'Environmental Data' */
if ((erp_filled == NULL) && (sense[2] & SNS2_ENV_DATA_PRESENT)) {
erp_filled = dasd_3990_erp_env_data(erp, sense);
}
/* 'No Record Found' */
if ((erp_filled == NULL) && (sense[1] & SNS1_NO_REC_FOUND)) {
erp_filled = dasd_3990_erp_no_rec(erp, sense);
}
/* 'File Protected' */
if ((erp_filled == NULL) && (sense[1] & SNS1_FILE_PROTECTED)) {
erp_filled = dasd_3990_erp_file_prot(erp);
}
/* other (unknown) error - do default ERP */
if (erp_filled == NULL) {
erp_filled = erp;
}
return erp_filled;
} /* END dasd_3990_erp_inspect_24 */
/*
*****************************************************************************
* 32 byte sense ERP functions (only)
*****************************************************************************
*/
/*
* DASD_3990_ERPACTION_10_32
*
* DESCRIPTION
* Handles 32 byte 'Action 10' of Single Program Action Codes.
* Just retry and if retry doesn't work, return with error.
*
* PARAMETER
* erp current erp_head
* sense current sense data
* RETURN VALUES
* erp modified erp_head
*/
static struct dasd_ccw_req *
dasd_3990_erp_action_10_32(struct dasd_ccw_req * erp, char *sense)
{
struct dasd_device *device = erp->startdev;
erp->retries = 256;
erp->function = dasd_3990_erp_action_10_32;
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Perform logging requested");
return erp;
} /* end dasd_3990_erp_action_10_32 */
/*
* DASD_3990_ERP_ACTION_1B_32
*
* DESCRIPTION
* Handles 32 byte 'Action 1B' of Single Program Action Codes.
* A write operation could not be finished because of an unexpected
* condition.
* The already created 'default erp' is used to get the link to
* the erp chain, but it can not be used for this recovery
* action because it contains no DE/LO data space.
*
* PARAMETER
* default_erp already added default erp.
* sense current sense data
*
* RETURN VALUES
* erp new erp or
* default_erp in case of imprecise ending or error
*/
static struct dasd_ccw_req *
dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
{
struct dasd_device *device = default_erp->startdev;
__u32 cpa = 0;
struct dasd_ccw_req *cqr;
struct dasd_ccw_req *erp;
struct DE_eckd_data *DE_data;
struct PFX_eckd_data *PFX_data;
char *LO_data; /* LO_eckd_data_t */
struct ccw1 *ccw, *oldccw;
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Write not finished because of unexpected condition");
default_erp->function = dasd_3990_erp_action_1B_32;
/* determine the original cqr */
cqr = default_erp;
while (cqr->refers != NULL) {
cqr = cqr->refers;
}
if (scsw_is_tm(&cqr->irb.scsw)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"32 bit sense, action 1B is not defined"
" in transport mode - just retry");
return default_erp;
}
/* for imprecise ending just do default erp */
if (sense[1] & 0x01) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Imprecise ending is set - just retry");
return default_erp;
}
/* determine the address of the CCW to be restarted */
/* Imprecise ending is not set -> addr from IRB-SCSW */
cpa = default_erp->refers->irb.scsw.cmd.cpa;
if (cpa == 0) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Unable to determine address of the CCW "
"to be restarted");
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
}
/* Build new ERP request including DE/LO */
erp = dasd_alloc_erp_request(cqr->magic,
2 + 1,/* DE/LO + TIC */
sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data), device);
if (IS_ERR(erp)) {
/* internal error 01 - Unable to allocate ERP */
dev_err(&device->cdev->dev, "An error occurred in the DASD "
"device driver, reason=%s\n", "01");
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
}
/* use original DE */
DE_data = erp->data;
oldccw = cqr->cpaddr;
if (oldccw->cmd_code == DASD_ECKD_CCW_PFX) {
PFX_data = cqr->data;
memcpy(DE_data, &PFX_data->define_extent,
sizeof(struct DE_eckd_data));
} else
memcpy(DE_data, cqr->data, sizeof(struct DE_eckd_data));
/* create LO */
LO_data = erp->data + sizeof(struct DE_eckd_data);
if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
/* should not */
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
}
if ((sense[7] & 0x3F) == 0x01) {
/* operation code is WRITE DATA -> data area orientation */
LO_data[0] = 0x81;
} else if ((sense[7] & 0x3F) == 0x03) {
/* operation code is FORMAT WRITE -> index orientation */
LO_data[0] = 0xC3;
} else {
LO_data[0] = sense[7]; /* operation */
}
LO_data[1] = sense[8]; /* auxiliary */
LO_data[2] = sense[9];
LO_data[3] = sense[3]; /* count */
LO_data[4] = sense[29]; /* seek_addr.cyl */
LO_data[5] = sense[30]; /* seek_addr.cyl 2nd byte */
LO_data[7] = sense[31]; /* seek_addr.head 2nd byte */
memcpy(&(LO_data[8]), &(sense[11]), 8);
/* create DE ccw */
ccw = erp->cpaddr;
memset(ccw, 0, sizeof(struct ccw1));
ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
ccw->flags = CCW_FLAG_CC;
ccw->count = 16;
ccw->cda = (__u32)virt_to_phys(DE_data);
/* create LO ccw */
ccw++;
memset(ccw, 0, sizeof(struct ccw1));
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
ccw->flags = CCW_FLAG_CC;
ccw->count = 16;
ccw->cda = (__u32)virt_to_phys(LO_data);
/* TIC to the failed ccw */
ccw++;
ccw->cmd_code = CCW_CMD_TIC;
ccw->cda = cpa;
/* fill erp related fields */
erp->flags = default_erp->flags;
erp->function = dasd_3990_erp_action_1B_32;
erp->refers = default_erp->refers;
erp->startdev = device;
erp->memdev = device;
erp->magic = default_erp->magic;
erp->expires = default_erp->expires;
erp->retries = 256;
erp->buildclk = get_tod_clock();
erp->status = DASD_CQR_FILLED;
/* remove the default erp */
dasd_free_erp_request(default_erp, device);
return erp;
} /* end dasd_3990_erp_action_1B_32 */
/*
* DASD_3990_UPDATE_1B
*
* DESCRIPTION
* Handles the update to the 32 byte 'Action 1B' of Single Program
* Action Codes in case the first action was not successful.
* The already created 'previous_erp' is the currently not successful
* ERP.
*
* PARAMETER
* previous_erp already created previous erp.
* sense current sense data
* RETURN VALUES
* erp modified erp
*/
static struct dasd_ccw_req *
dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
{
struct dasd_device *device = previous_erp->startdev;
__u32 cpa = 0;
struct dasd_ccw_req *cqr;
struct dasd_ccw_req *erp;
char *LO_data; /* struct LO_eckd_data */
struct ccw1 *ccw;
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Write not finished because of unexpected condition"
" - follow on");
/* determine the original cqr */
cqr = previous_erp;
while (cqr->refers != NULL) {
cqr = cqr->refers;
}
if (scsw_is_tm(&cqr->irb.scsw)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"32 bit sense, action 1B, update,"
" in transport mode - just retry");
return previous_erp;
}
/* for imprecise ending just do default erp */
if (sense[1] & 0x01) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Imprecise ending is set - just retry");
previous_erp->status = DASD_CQR_FILLED;
return previous_erp;
}
/* determine the address of the CCW to be restarted */
/* Imprecise ending is not set -> addr from IRB-SCSW */
cpa = previous_erp->irb.scsw.cmd.cpa;
if (cpa == 0) {
/* internal error 02 -
Unable to determine address of the CCW to be restarted */
dev_err(&device->cdev->dev, "An error occurred in the DASD "
"device driver, reason=%s\n", "02");
previous_erp->status = DASD_CQR_FAILED;
return previous_erp;
}
erp = previous_erp;
/* update the LO with the new returned sense data */
LO_data = erp->data + sizeof(struct DE_eckd_data);
if ((sense[3] == 0x01) && (LO_data[1] & 0x01)) {
/* should not happen */
previous_erp->status = DASD_CQR_FAILED;
return previous_erp;
}
if ((sense[7] & 0x3F) == 0x01) {
/* operation code is WRITE DATA -> data area orientation */
LO_data[0] = 0x81;
} else if ((sense[7] & 0x3F) == 0x03) {
/* operation code is FORMAT WRITE -> index orientation */
LO_data[0] = 0xC3;
} else {
LO_data[0] = sense[7]; /* operation */
}
LO_data[1] = sense[8]; /* auxiliary */
LO_data[2] = sense[9];
LO_data[3] = sense[3]; /* count */
LO_data[4] = sense[29]; /* seek_addr.cyl */
LO_data[5] = sense[30]; /* seek_addr.cyl 2nd byte */
LO_data[7] = sense[31]; /* seek_addr.head 2nd byte */
memcpy(&(LO_data[8]), &(sense[11]), 8);
/* TIC to the failed ccw */
ccw = erp->cpaddr; /* addr of DE ccw */
ccw++; /* addr of LE ccw */
ccw++; /* addr of TIC ccw */
ccw->cda = cpa;
erp->status = DASD_CQR_FILLED;
return erp;
} /* end dasd_3990_update_1B */
/*
* DASD_3990_ERP_COMPOUND_RETRY
*
* DESCRIPTION
* Handles the compound ERP action retry code.
* NOTE: At least one retry is done even if zero is specified
* by the sense data. This makes enqueueing of the request
* easier.
*
* PARAMETER
* sense sense data of the actual error
* erp pointer to the currently created ERP
*
* RETURN VALUES
* erp modified ERP pointer
*
*/
static void
dasd_3990_erp_compound_retry(struct dasd_ccw_req * erp, char *sense)
{
switch (sense[25] & 0x03) {
case 0x00: /* no not retry */
erp->retries = 1;
break;
case 0x01: /* retry 2 times */
erp->retries = 2;
break;
case 0x02: /* retry 10 times */
erp->retries = 10;
break;
case 0x03: /* retry 256 times */
erp->retries = 256;
break;
default:
BUG();
}
erp->function = dasd_3990_erp_compound_retry;
} /* end dasd_3990_erp_compound_retry */
/*
* DASD_3990_ERP_COMPOUND_PATH
*
* DESCRIPTION
* Handles the compound ERP action for retry on alternate
* channel path.
*
* PARAMETER
* sense sense data of the actual error
* erp pointer to the currently created ERP
*
* RETURN VALUES
* erp modified ERP pointer
*
*/
static void
dasd_3990_erp_compound_path(struct dasd_ccw_req * erp, char *sense)
{
if (sense[25] & DASD_SENSE_BIT_3) {
dasd_3990_erp_alternate_path(erp);
if (erp->status == DASD_CQR_FAILED &&
!test_bit(DASD_CQR_VERIFY_PATH, &erp->flags)) {
/* reset the lpm and the status to be able to
* try further actions. */
erp->lpm = dasd_path_get_opm(erp->startdev);
erp->status = DASD_CQR_NEED_ERP;
}
}
erp->function = dasd_3990_erp_compound_path;
} /* end dasd_3990_erp_compound_path */
/*
* DASD_3990_ERP_COMPOUND_CODE
*
* DESCRIPTION
* Handles the compound ERP action for retry code.
*
* PARAMETER
* sense sense data of the actual error
* erp pointer to the currently created ERP
*
* RETURN VALUES
* erp NEW ERP pointer
*
*/
static struct dasd_ccw_req *
dasd_3990_erp_compound_code(struct dasd_ccw_req * erp, char *sense)
{
if (sense[25] & DASD_SENSE_BIT_2) {
switch (sense[28]) {
case 0x17:
/* issue a Diagnostic Control command with an
* Inhibit Write subcommand and controller modifier */
erp = dasd_3990_erp_DCTL(erp, 0x20);
break;
case 0x25:
/* wait for 5 seconds and retry again */
erp->retries = 1;
dasd_3990_erp_block_queue (erp, 5*HZ);
break;
default:
/* should not happen - continue */
break;
}
}
erp->function = dasd_3990_erp_compound_code;
return erp;
} /* end dasd_3990_erp_compound_code */
/*
* DASD_3990_ERP_COMPOUND_CONFIG
*
* DESCRIPTION
* Handles the compound ERP action for configuration
* dependent error.
* Note: duplex handling is not implemented (yet).
*
* PARAMETER
* sense sense data of the actual error
* erp pointer to the currently created ERP
*
* RETURN VALUES
* erp modified ERP pointer
*
*/
static void
dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
{
if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
/* set to suspended duplex state then restart
internal error 05 - Set device to suspended duplex state
should be done */
struct dasd_device *device = erp->startdev;
dev_err(&device->cdev->dev,
"An error occurred in the DASD device driver, "
"reason=%s\n", "05");
}
erp->function = dasd_3990_erp_compound_config;
} /* end dasd_3990_erp_compound_config */
/*
* DASD_3990_ERP_COMPOUND
*
* DESCRIPTION
* Does the further compound program action if
* compound retry was not successful.
*
* PARAMETER
* sense sense data of the actual error
* erp pointer to the current (failed) ERP
*
* RETURN VALUES
* erp (additional) ERP pointer
*
*/
static struct dasd_ccw_req *
dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
{
if ((erp->function == dasd_3990_erp_compound_retry) &&
(erp->status == DASD_CQR_NEED_ERP)) {
dasd_3990_erp_compound_path(erp, sense);
}
if ((erp->function == dasd_3990_erp_compound_path) &&
(erp->status == DASD_CQR_NEED_ERP)) {
erp = dasd_3990_erp_compound_code(erp, sense);
}
if ((erp->function == dasd_3990_erp_compound_code) &&
(erp->status == DASD_CQR_NEED_ERP)) {
dasd_3990_erp_compound_config(erp, sense);
}
/* if no compound action ERP specified, the request failed */
if (erp->status == DASD_CQR_NEED_ERP)
erp->status = DASD_CQR_FAILED;
return erp;
} /* end dasd_3990_erp_compound */
/*
*DASD_3990_ERP_HANDLE_SIM
*
*DESCRIPTION
* inspects the SIM SENSE data and starts an appropriate action
*
* PARAMETER
* sense sense data of the actual error
*
* RETURN VALUES
* none
*/
void
dasd_3990_erp_handle_sim(struct dasd_device *device, char *sense)
{
/* print message according to log or message to operator mode */
if ((sense[24] & DASD_SIM_MSG_TO_OP) || (sense[1] & 0x10)) {
/* print SIM SRC from RefCode */
dev_err(&device->cdev->dev, "SIM - SRC: "
"%02x%02x%02x%02x\n", sense[22],
sense[23], sense[11], sense[12]);
} else if (sense[24] & DASD_SIM_LOG) {
/* print SIM SRC Refcode */
dev_warn(&device->cdev->dev, "log SIM - SRC: "
"%02x%02x%02x%02x\n", sense[22],
sense[23], sense[11], sense[12]);
}
}
/*
* DASD_3990_ERP_INSPECT_32
*
* DESCRIPTION
* Does a detailed inspection of the 32 byte sense data
* and sets up a related error recovery action.
*
* PARAMETER
* sense sense data of the actual error
* erp pointer to the currently created default ERP
*
* RETURN VALUES
* erp_filled pointer to the ERP
*
*/
static struct dasd_ccw_req *
dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
{
struct dasd_device *device = erp->startdev;
erp->function = dasd_3990_erp_inspect_32;
/* check for SIM sense data */
if ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)
dasd_3990_erp_handle_sim(device, sense);
if (sense[25] & DASD_SENSE_BIT_0) {
/* compound program action codes (byte25 bit 0 == '1') */
dasd_3990_erp_compound_retry(erp, sense);
} else {
/* single program action codes (byte25 bit 0 == '0') */
switch (sense[25]) {
case 0x00: /* success - use default ERP for retries */
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
"ERP called for successful request"
" - just retry");
break;
case 0x01: /* fatal error */
dev_err(&device->cdev->dev,
"ERP failed for the DASD\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
break;
case 0x02: /* intervention required */
case 0x03: /* intervention required during dual copy */
erp = dasd_3990_erp_int_req(erp);
break;
case 0x0F: /* length mismatch during update write command
internal error 08 - update write command error*/
dev_err(&device->cdev->dev, "An error occurred in the "
"DASD device driver, reason=%s\n", "08");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
break;
case 0x10: /* logging required for other channel program */
erp = dasd_3990_erp_action_10_32(erp, sense);
break;
case 0x15: /* next track outside defined extend
internal error 07 - The next track is not
within the defined storage extent */
dev_err(&device->cdev->dev,
"An error occurred in the DASD device driver, "
"reason=%s\n", "07");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
break;
case 0x1B: /* unexpected condition during write */
erp = dasd_3990_erp_action_1B_32(erp, sense);
break;
case 0x1C: /* invalid data */
dev_emerg(&device->cdev->dev,
"Data recovered during retry with PCI "
"fetch mode active\n");
/* not possible to handle this situation in Linux */
panic
("Invalid data - No way to inform application "
"about the possibly incorrect data");
break;
case 0x1D: /* state-change pending */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"A State change pending condition exists "
"for the subsystem or device");
erp = dasd_3990_erp_action_4(erp, sense);
break;
case 0x1E: /* busy */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Busy condition exists "
"for the subsystem or device");
erp = dasd_3990_erp_action_4(erp, sense);
break;
default: /* all others errors - default erp */
break;
}
}
return erp;
} /* end dasd_3990_erp_inspect_32 */
static void dasd_3990_erp_disable_path(struct dasd_device *device, __u8 lpum)
{
int pos = pathmask_to_pos(lpum);
if (!(device->features & DASD_FEATURE_PATH_AUTODISABLE)) {
dev_err(&device->cdev->dev,
"Path %x.%02x (pathmask %02x) is operational despite excessive IFCCs\n",
device->path[pos].cssid, device->path[pos].chpid, lpum);
goto out;
}
/* no remaining path, cannot disable */
if (!(dasd_path_get_opm(device) & ~lpum)) {
dev_err(&device->cdev->dev,
"Last path %x.%02x (pathmask %02x) is operational despite excessive IFCCs\n",
device->path[pos].cssid, device->path[pos].chpid, lpum);
goto out;
}
dev_err(&device->cdev->dev,
"Path %x.%02x (pathmask %02x) is disabled - IFCC threshold exceeded\n",
device->path[pos].cssid, device->path[pos].chpid, lpum);
dasd_path_remove_opm(device, lpum);
dasd_path_add_ifccpm(device, lpum);
out:
device->path[pos].errorclk = 0;
atomic_set(&device->path[pos].error_count, 0);
}
static void dasd_3990_erp_account_error(struct dasd_ccw_req *erp)
{
struct dasd_device *device = erp->startdev;
__u8 lpum = erp->refers->irb.esw.esw1.lpum;
int pos = pathmask_to_pos(lpum);
unsigned long clk;
if (!device->path_thrhld)
return;
clk = get_tod_clock();
/*
* check if the last error is longer ago than the timeout,
* if so reset error state
*/
if ((tod_to_ns(clk - device->path[pos].errorclk) / NSEC_PER_SEC)
>= device->path_interval) {
atomic_set(&device->path[pos].error_count, 0);
device->path[pos].errorclk = 0;
}
atomic_inc(&device->path[pos].error_count);
device->path[pos].errorclk = clk;
/* threshold exceeded disable path if possible */
if (atomic_read(&device->path[pos].error_count) >=
device->path_thrhld)
dasd_3990_erp_disable_path(device, lpum);
}
/*
*****************************************************************************
* main ERP control functions (24 and 32 byte sense)
*****************************************************************************
*/
/*
* DASD_3990_ERP_CONTROL_CHECK
*
* DESCRIPTION
* Does a generic inspection if a control check occurred and sets up
* the related error recovery procedure
*
* PARAMETER
* erp pointer to the currently created default ERP
*
* RETURN VALUES
* erp_filled pointer to the erp
*/
static struct dasd_ccw_req *
dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
{
struct dasd_device *device = erp->startdev;
if (scsw_cstat(&erp->refers->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK
| SCHN_STAT_CHN_CTRL_CHK)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"channel or interface control check");
dasd_3990_erp_account_error(erp);
erp = dasd_3990_erp_action_4(erp, NULL);
}
return erp;
}
/*
* DASD_3990_ERP_INSPECT
*
* DESCRIPTION
* Does a detailed inspection for sense data by calling either
* the 24-byte or the 32-byte inspection routine.
*
* PARAMETER
* erp pointer to the currently created default ERP
* RETURN VALUES
* erp_new contens was possibly modified
*/
static struct dasd_ccw_req *
dasd_3990_erp_inspect(struct dasd_ccw_req *erp)
{
struct dasd_ccw_req *erp_new = NULL;
char *sense;
/* if this problem occurred on an alias retry on base */
erp_new = dasd_3990_erp_inspect_alias(erp);
if (erp_new)
return erp_new;
/* sense data are located in the refers record of the
* already set up new ERP !
* check if concurrent sens is available
*/
sense = dasd_get_sense(&erp->refers->irb);
if (!sense)
erp_new = dasd_3990_erp_control_check(erp);
/* distinguish between 24 and 32 byte sense data */
else if (sense[27] & DASD_SENSE_BIT_0) {
/* inspect the 24 byte sense data */
erp_new = dasd_3990_erp_inspect_24(erp, sense);
} else {
/* inspect the 32 byte sense data */
erp_new = dasd_3990_erp_inspect_32(erp, sense);
} /* end distinguish between 24 and 32 byte sense data */
return erp_new;
}
/*
* DASD_3990_ERP_ADD_ERP
*
* DESCRIPTION
* This function adds an additional request block (ERP) to the head of
* the given cqr (or erp).
* For a command mode cqr the erp is initialized as an default erp
* (retry TIC).
* For transport mode we make a copy of the original TCW (points to
* the original TCCB, TIDALs, etc.) but give it a fresh
* TSB so the original sense data will not be changed.
*
* PARAMETER
* cqr head of the current ERP-chain (or single cqr if
* first error)
* RETURN VALUES
* erp pointer to new ERP-chain head
*/
static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
{
struct dasd_device *device = cqr->startdev;
struct ccw1 *ccw;
struct dasd_ccw_req *erp;
int cplength, datasize;
struct tcw *tcw;
struct tsb *tsb;
if (cqr->cpmode == 1) {
cplength = 0;
/* TCW needs to be 64 byte aligned, so leave enough room */
datasize = 64 + sizeof(struct tcw) + sizeof(struct tsb);
} else {
cplength = 2;
datasize = 0;
}
/* allocate additional request block */
erp = dasd_alloc_erp_request(cqr->magic,
cplength, datasize, device);
if (IS_ERR(erp)) {
if (cqr->retries <= 0) {
DBF_DEV_EVENT(DBF_ERR, device, "%s",
"Unable to allocate ERP request");
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_tod_clock();
} else {
DBF_DEV_EVENT(DBF_ERR, device,
"Unable to allocate ERP request "
"(%i retries left)",
cqr->retries);
dasd_block_set_timer(device->block, (HZ << 3));
}
return erp;
}
ccw = cqr->cpaddr;
if (cqr->cpmode == 1) {
/* make a shallow copy of the original tcw but set new tsb */
erp->cpmode = 1;
erp->cpaddr = PTR_ALIGN(erp->data, 64);
tcw = erp->cpaddr;
tsb = (struct tsb *) &tcw[1];
*tcw = *((struct tcw *)cqr->cpaddr);
tcw->tsb = virt_to_phys(tsb);
} else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
/* PSF cannot be chained from NOOP/TIC */
erp->cpaddr = cqr->cpaddr;
} else {
/* initialize request with default TIC to current ERP/CQR */
ccw = erp->cpaddr;
ccw->cmd_code = CCW_CMD_NOOP;
ccw->flags = CCW_FLAG_CC;
ccw++;
ccw->cmd_code = CCW_CMD_TIC;
ccw->cda = (__u32)virt_to_phys(cqr->cpaddr);
}
erp->flags = cqr->flags;
erp->function = dasd_3990_erp_add_erp;
erp->refers = cqr;
erp->startdev = device;
erp->memdev = device;
erp->block = cqr->block;
erp->magic = cqr->magic;
erp->expires = cqr->expires;
erp->retries = device->default_retries;
erp->buildclk = get_tod_clock();
erp->status = DASD_CQR_FILLED;
return erp;
}
/*
* DASD_3990_ERP_ADDITIONAL_ERP
*
* DESCRIPTION
* An additional ERP is needed to handle the current error.
* Add ERP to the head of the ERP-chain containing the ERP processing
* determined based on the sense data.
*
* PARAMETER
* cqr head of the current ERP-chain (or single cqr if
* first error)
*
* RETURN VALUES
* erp pointer to new ERP-chain head
*/
static struct dasd_ccw_req *
dasd_3990_erp_additional_erp(struct dasd_ccw_req * cqr)
{
struct dasd_ccw_req *erp = NULL;
/* add erp and initialize with default TIC */
erp = dasd_3990_erp_add_erp(cqr);
if (IS_ERR(erp))
return erp;
/* inspect sense, determine specific ERP if possible */
if (erp != cqr) {
erp = dasd_3990_erp_inspect(erp);
}
return erp;
} /* end dasd_3990_erp_additional_erp */
/*
* DASD_3990_ERP_ERROR_MATCH
*
* DESCRIPTION
* Check if the device status of the given cqr is the same.
* This means that the failed CCW and the relevant sense data
* must match.
* I don't distinguish between 24 and 32 byte sense because in case of
* 24 byte sense byte 25 and 27 is set as well.
*
* PARAMETER
* cqr1 first cqr, which will be compared with the
* cqr2 second cqr.
*
* RETURN VALUES
* match 'boolean' for match found
* returns 1 if match found, otherwise 0.
*/
static int dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1,
struct dasd_ccw_req *cqr2)
{
char *sense1, *sense2;
if (cqr1->startdev != cqr2->startdev)
return 0;
sense1 = dasd_get_sense(&cqr1->irb);
sense2 = dasd_get_sense(&cqr2->irb);
/* one request has sense data, the other not -> no match, return 0 */
if (!sense1 != !sense2)
return 0;
/* no sense data in both cases -> check cstat for IFCC */
if (!sense1 && !sense2) {
if ((scsw_cstat(&cqr1->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_CTRL_CHK)) ==
(scsw_cstat(&cqr2->irb.scsw) & (SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_CTRL_CHK)))
return 1; /* match with ifcc*/
}
/* check sense data; byte 0-2,25,27 */
if (!(sense1 && sense2 &&
(memcmp(sense1, sense2, 3) == 0) &&
(sense1[27] == sense2[27]) &&
(sense1[25] == sense2[25]))) {
return 0; /* sense doesn't match */
}
return 1; /* match */
} /* end dasd_3990_erp_error_match */
/*
* DASD_3990_ERP_IN_ERP
*
* DESCRIPTION
* check if the current error already happened before.
* quick exit if current cqr is not an ERP (cqr->refers=NULL)
*
* PARAMETER
* cqr failed cqr (either original cqr or already an erp)
*
* RETURN VALUES
* erp erp-pointer to the already defined error
* recovery procedure OR
* NULL if a 'new' error occurred.
*/
static struct dasd_ccw_req *
dasd_3990_erp_in_erp(struct dasd_ccw_req *cqr)
{
struct dasd_ccw_req *erp_head = cqr, /* save erp chain head */
*erp_match = NULL; /* save erp chain head */
int match = 0; /* 'boolean' for matching error found */
if (cqr->refers == NULL) { /* return if not in erp */
return NULL;
}
/* check the erp/cqr chain for current error */
do {
match = dasd_3990_erp_error_match(erp_head, cqr->refers);
erp_match = cqr; /* save possible matching erp */
cqr = cqr->refers; /* check next erp/cqr in queue */
} while ((cqr->refers != NULL) && (!match));
if (!match) {
return NULL; /* no match was found */
}
return erp_match; /* return address of matching erp */
} /* END dasd_3990_erp_in_erp */
/*
* DASD_3990_ERP_FURTHER_ERP (24 & 32 byte sense)
*
* DESCRIPTION
* No retry is left for the current ERP. Check what has to be done
* with the ERP.
* - do further defined ERP action or
* - wait for interrupt or
* - exit with permanent error
*
* PARAMETER
* erp ERP which is in progress with no retry left
*
* RETURN VALUES
* erp modified/additional ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
{
struct dasd_device *device = erp->startdev;
char *sense = dasd_get_sense(&erp->irb);
/* check for 24 byte sense ERP */
if ((erp->function == dasd_3990_erp_bus_out) ||
(erp->function == dasd_3990_erp_action_1) ||
(erp->function == dasd_3990_erp_action_4)) {
erp = dasd_3990_erp_action_1(erp);
} else if (erp->function == dasd_3990_erp_action_1_sec) {
erp = dasd_3990_erp_action_1_sec(erp);
} else if (erp->function == dasd_3990_erp_action_5) {
/* retries have not been successful */
/* prepare erp for retry on different channel path */
erp = dasd_3990_erp_action_1(erp);
if (sense && !(sense[2] & DASD_SENSE_BIT_0)) {
/* issue a Diagnostic Control command with an
* Inhibit Write subcommand */
switch (sense[25]) {
case 0x17:
case 0x57:{ /* controller */
erp = dasd_3990_erp_DCTL(erp, 0x20);
break;
}
case 0x18:
case 0x58:{ /* channel path */
erp = dasd_3990_erp_DCTL(erp, 0x40);
break;
}
case 0x19:
case 0x59:{ /* storage director */
erp = dasd_3990_erp_DCTL(erp, 0x80);
break;
}
default:
DBF_DEV_EVENT(DBF_WARNING, device,
"invalid subcommand modifier 0x%x "
"for Diagnostic Control Command",
sense[25]);
}
}
/* check for 32 byte sense ERP */
} else if (sense &&
((erp->function == dasd_3990_erp_compound_retry) ||
(erp->function == dasd_3990_erp_compound_path) ||
(erp->function == dasd_3990_erp_compound_code) ||
(erp->function == dasd_3990_erp_compound_config))) {
erp = dasd_3990_erp_compound(erp, sense);
} else {
/*
* No retry left and no additional special handling
* necessary
*/
dev_err(&device->cdev->dev,
"ERP %p has run out of retries and failed\n", erp);
erp->status = DASD_CQR_FAILED;
}
return erp;
} /* end dasd_3990_erp_further_erp */
/*
* DASD_3990_ERP_HANDLE_MATCH_ERP
*
* DESCRIPTION
* An error occurred again and an ERP has been detected which is already
* used to handle this error (e.g. retries).
* All prior ERP's are asumed to be successful and therefore removed
* from queue.
* If retry counter of matching erp is already 0, it is checked if further
* action is needed (besides retry) or if the ERP has failed.
*
* PARAMETER
* erp_head first ERP in ERP-chain
* erp ERP that handles the actual error.
* (matching erp)
*
* RETURN VALUES
* erp modified/additional ERP
*/
static struct dasd_ccw_req *
dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
struct dasd_ccw_req *erp)
{
struct dasd_device *device = erp_head->startdev;
struct dasd_ccw_req *erp_done = erp_head; /* finished req */
struct dasd_ccw_req *erp_free = NULL; /* req to be freed */
/* loop over successful ERPs and remove them from chanq */
while (erp_done != erp) {
if (erp_done == NULL) /* end of chain reached */
panic(PRINTK_HEADER "Programming error in ERP! The "
"original request was lost\n");
/* remove the request from the device queue */
list_del(&erp_done->blocklist);
erp_free = erp_done;
erp_done = erp_done->refers;
/* free the finished erp request */
dasd_free_erp_request(erp_free, erp_free->memdev);
} /* end while */
if (erp->retries > 0) {
char *sense = dasd_get_sense(&erp->refers->irb);
/* check for special retries */
if (sense && erp->function == dasd_3990_erp_action_4) {
erp = dasd_3990_erp_action_4(erp, sense);
} else if (sense &&
erp->function == dasd_3990_erp_action_1B_32) {
erp = dasd_3990_update_1B(erp, sense);
} else if (sense && erp->function == dasd_3990_erp_int_req) {
erp = dasd_3990_erp_int_req(erp);
} else {
/* simple retry */
DBF_DEV_EVENT(DBF_DEBUG, device,
"%i retries left for erp %p",
erp->retries, erp);
/* handle the request again... */
erp->status = DASD_CQR_FILLED;
}
} else {
/* no retry left - check for further necessary action */
/* if no further actions, handle rest as permanent error */
erp = dasd_3990_erp_further_erp(erp);
}
return erp;
} /* end dasd_3990_erp_handle_match_erp */
/*
* DASD_3990_ERP_ACTION
*
* DESCRIPTION
* control routine for 3990 erp actions.
* Has to be called with the queue lock (namely the s390_irq_lock) acquired.
*
* PARAMETER
* cqr failed cqr (either original cqr or already an erp)
*
* RETURN VALUES
* erp erp-pointer to the head of the ERP action chain.
* This means:
* - either a ptr to an additional ERP cqr or
* - the original given cqr (which's status might
* be modified)
*/
struct dasd_ccw_req *
dasd_3990_erp_action(struct dasd_ccw_req * cqr)
{
struct dasd_ccw_req *erp = NULL;
struct dasd_device *device = cqr->startdev;
struct dasd_ccw_req *temp_erp = NULL;
if (device->features & DASD_FEATURE_ERPLOG) {
/* print current erp_chain */
dev_err(&device->cdev->dev,
"ERP chain at BEGINNING of ERP-ACTION\n");
for (temp_erp = cqr;
temp_erp != NULL; temp_erp = temp_erp->refers) {
dev_err(&device->cdev->dev,
"ERP %p (%02x) refers to %p\n",
temp_erp, temp_erp->status,
temp_erp->refers);
}
}
/* double-check if current erp/cqr was successful */
if ((scsw_cstat(&cqr->irb.scsw) == 0x00) &&
(scsw_dstat(&cqr->irb.scsw) ==
(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) {
DBF_DEV_EVENT(DBF_DEBUG, device,
"ERP called for successful request %p"
" - NO ERP necessary", cqr);
cqr->status = DASD_CQR_DONE;
return cqr;
}
/* check if error happened before */
erp = dasd_3990_erp_in_erp(cqr);
if (erp == NULL) {
/* no matching erp found - set up erp */
erp = dasd_3990_erp_additional_erp(cqr);
if (IS_ERR(erp))
return erp;
} else {
/* matching erp found - set all leading erp's to DONE */
erp = dasd_3990_erp_handle_match_erp(cqr, erp);
}
/*
* For path verification work we need to stick with the path that was
* originally chosen so that the per path configuration data is
* assigned correctly.
*/
if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
erp->lpm = cqr->lpm;
}
if (device->features & DASD_FEATURE_ERPLOG) {
/* print current erp_chain */
dev_err(&device->cdev->dev,
"ERP chain at END of ERP-ACTION\n");
for (temp_erp = erp;
temp_erp != NULL; temp_erp = temp_erp->refers) {
dev_err(&device->cdev->dev,
"ERP %p (%02x) refers to %p\n",
temp_erp, temp_erp->status,
temp_erp->refers);
}
}
/* enqueue ERP request if it's a new one */
if (list_empty(&erp->blocklist)) {
cqr->status = DASD_CQR_IN_ERP;
/* add erp request before the cqr */
list_add_tail(&erp->blocklist, &cqr->blocklist);
}
return erp;
} /* end dasd_3990_erp_action */
| linux-master | drivers/s390/block/dasd_3990_erp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <[email protected]>
* Bugreports.to..: <[email protected]>
* Copyright IBM Corp. 1999, 2009
*/
#define KMSG_COMPONENT "dasd-fba"
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <asm/debug.h>
#include <linux/slab.h>
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/ccwdev.h>
#include "dasd_int.h"
#include "dasd_fba.h"
#ifdef PRINTK_HEADER
#undef PRINTK_HEADER
#endif /* PRINTK_HEADER */
#define PRINTK_HEADER "dasd(fba):"
#define FBA_DEFAULT_RETRIES 32
#define DASD_FBA_CCW_WRITE 0x41
#define DASD_FBA_CCW_READ 0x42
#define DASD_FBA_CCW_LOCATE 0x43
#define DASD_FBA_CCW_DEFINE_EXTENT 0x63
MODULE_LICENSE("GPL");
static struct dasd_discipline dasd_fba_discipline;
static void *dasd_fba_zero_page;
struct dasd_fba_private {
struct dasd_fba_characteristics rdc_data;
};
static struct ccw_device_id dasd_fba_ids[] = {
{ CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), .driver_info = 0x1},
{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), .driver_info = 0x2},
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ccw, dasd_fba_ids);
static int
dasd_fba_set_online(struct ccw_device *cdev)
{
return dasd_generic_set_online(cdev, &dasd_fba_discipline);
}
static struct ccw_driver dasd_fba_driver = {
.driver = {
.name = "dasd-fba",
.owner = THIS_MODULE,
.dev_groups = dasd_dev_groups,
},
.ids = dasd_fba_ids,
.probe = dasd_generic_probe,
.remove = dasd_generic_remove,
.set_offline = dasd_generic_set_offline,
.set_online = dasd_fba_set_online,
.notify = dasd_generic_notify,
.path_event = dasd_generic_path_event,
.int_class = IRQIO_DAS,
};
static void
define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
int blksize, int beg, int nr)
{
ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
ccw->cda = (__u32)virt_to_phys(data);
memset(data, 0, sizeof (struct DE_fba_data));
if (rw == WRITE)
(data->mask).perm = 0x0;
else if (rw == READ)
(data->mask).perm = 0x1;
else
data->mask.perm = 0x2;
data->blk_size = blksize;
data->ext_loc = beg;
data->ext_end = nr - 1;
}
static void
locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
int block_nr, int block_ct)
{
ccw->cmd_code = DASD_FBA_CCW_LOCATE;
ccw->flags = 0;
ccw->count = 8;
ccw->cda = (__u32)virt_to_phys(data);
memset(data, 0, sizeof (struct LO_fba_data));
if (rw == WRITE)
data->operation.cmd = 0x5;
else if (rw == READ)
data->operation.cmd = 0x6;
else
data->operation.cmd = 0x8;
data->blk_nr = block_nr;
data->blk_ct = block_ct;
}
static int
dasd_fba_check_characteristics(struct dasd_device *device)
{
struct dasd_fba_private *private = device->private;
struct ccw_device *cdev = device->cdev;
struct dasd_block *block;
int readonly, rc;
if (!private) {
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private) {
dev_warn(&device->cdev->dev,
"Allocating memory for private DASD "
"data failed\n");
return -ENOMEM;
}
device->private = private;
} else {
memset(private, 0, sizeof(*private));
}
block = dasd_alloc_block();
if (IS_ERR(block)) {
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
"dasd block structure");
device->private = NULL;
kfree(private);
return PTR_ERR(block);
}
device->block = block;
block->base = device;
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
&private->rdc_data, 32);
if (rc) {
DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
"characteristics returned error %d", rc);
device->block = NULL;
dasd_free_block(block);
device->private = NULL;
kfree(private);
return rc;
}
device->default_expires = DASD_EXPIRES;
device->default_retries = FBA_DEFAULT_RETRIES;
dasd_path_set_opm(device, LPM_ANYPATH);
readonly = dasd_device_is_ro(device);
if (readonly)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
/* FBA supports discard, set the according feature bit */
dasd_set_feature(cdev, DASD_FEATURE_DISCARD, 1);
dev_info(&device->cdev->dev,
"New FBA DASD %04X/%02X (CU %04X/%02X) with %d MB "
"and %d B/blk%s\n",
cdev->id.dev_type,
cdev->id.dev_model,
cdev->id.cu_type,
cdev->id.cu_model,
((private->rdc_data.blk_bdsa *
(private->rdc_data.blk_size >> 9)) >> 11),
private->rdc_data.blk_size,
readonly ? ", read-only device" : "");
return 0;
}
static int dasd_fba_do_analysis(struct dasd_block *block)
{
struct dasd_fba_private *private = block->base->private;
int sb, rc;
rc = dasd_check_blocksize(private->rdc_data.blk_size);
if (rc) {
DBF_DEV_EVENT(DBF_WARNING, block->base, "unknown blocksize %d",
private->rdc_data.blk_size);
return rc;
}
block->blocks = private->rdc_data.blk_bdsa;
block->bp_block = private->rdc_data.blk_size;
block->s2b_shift = 0; /* bits to shift 512 to get a block */
for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
block->s2b_shift++;
return 0;
}
static int dasd_fba_fill_geometry(struct dasd_block *block,
struct hd_geometry *geo)
{
if (dasd_check_blocksize(block->bp_block) != 0)
return -EINVAL;
geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
geo->heads = 16;
geo->sectors = 128 >> block->s2b_shift;
return 0;
}
static dasd_erp_fn_t
dasd_fba_erp_action(struct dasd_ccw_req * cqr)
{
return dasd_default_erp_action;
}
static dasd_erp_fn_t
dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
{
if (cqr->function == dasd_default_erp_action)
return dasd_default_erp_postaction;
DBF_DEV_EVENT(DBF_WARNING, cqr->startdev, "unknown ERP action %p",
cqr->function);
return NULL;
}
static void dasd_fba_check_for_device_change(struct dasd_device *device,
struct dasd_ccw_req *cqr,
struct irb *irb)
{
char mask;
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((irb->scsw.cmd.dstat & mask) == mask)
dasd_generic_handle_state_change(device);
};
/*
* Builds a CCW with no data payload
*/
static void ccw_write_no_data(struct ccw1 *ccw)
{
ccw->cmd_code = DASD_FBA_CCW_WRITE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 0;
}
/*
* Builds a CCW that writes only zeroes.
*/
static void ccw_write_zero(struct ccw1 *ccw, int count)
{
ccw->cmd_code = DASD_FBA_CCW_WRITE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = count;
ccw->cda = (__u32)virt_to_phys(dasd_fba_zero_page);
}
/*
* Helper function to count the amount of necessary CCWs within a given range
* with 4k alignment and command chaining in mind.
*/
static int count_ccws(sector_t first_rec, sector_t last_rec,
unsigned int blocks_per_page)
{
sector_t wz_stop = 0, d_stop = 0;
int cur_pos = 0;
int count = 0;
if (first_rec % blocks_per_page != 0) {
wz_stop = first_rec + blocks_per_page -
(first_rec % blocks_per_page) - 1;
if (wz_stop > last_rec)
wz_stop = last_rec;
cur_pos = wz_stop - first_rec + 1;
count++;
}
if (last_rec - (first_rec + cur_pos) + 1 >= blocks_per_page) {
if ((last_rec - blocks_per_page + 1) % blocks_per_page != 0)
d_stop = last_rec - ((last_rec - blocks_per_page + 1) %
blocks_per_page);
else
d_stop = last_rec;
cur_pos += d_stop - (first_rec + cur_pos) + 1;
count++;
}
if (cur_pos == 0 || first_rec + cur_pos - 1 < last_rec)
count++;
return count;
}
/*
* This function builds a CCW request for block layer discard requests.
* Each page in the z/VM hypervisor that represents certain records of an FBA
* device will be padded with zeros. This is a special behaviour of the WRITE
* command which is triggered when no data payload is added to the CCW.
*
* Note: Due to issues in some z/VM versions, we can't fully utilise this
* special behaviour. We have to keep a 4k (or 8 block) alignment in mind to
* work around those issues and write actual zeroes to the unaligned parts in
* the request. This workaround might be removed in the future.
*/
static struct dasd_ccw_req *dasd_fba_build_cp_discard(
struct dasd_device *memdev,
struct dasd_block *block,
struct request *req)
{
struct LO_fba_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
sector_t wz_stop = 0, d_stop = 0;
sector_t first_rec, last_rec;
unsigned int blksize = block->bp_block;
unsigned int blocks_per_page;
int wz_count = 0;
int d_count = 0;
int cur_pos = 0; /* Current position within the extent */
int count = 0;
int cplength;
int datasize;
int nr_ccws;
first_rec = blk_rq_pos(req) >> block->s2b_shift;
last_rec =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
count = last_rec - first_rec + 1;
blocks_per_page = BLOCKS_PER_PAGE(blksize);
nr_ccws = count_ccws(first_rec, last_rec, blocks_per_page);
/* define extent + nr_ccws * locate record + nr_ccws * single CCW */
cplength = 1 + 2 * nr_ccws;
datasize = sizeof(struct DE_fba_data) +
nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
define_extent(ccw++, cqr->data, WRITE, blksize, first_rec, count);
LO_data = cqr->data + sizeof(struct DE_fba_data);
/* First part is not aligned. Calculate range to write zeroes. */
if (first_rec % blocks_per_page != 0) {
wz_stop = first_rec + blocks_per_page -
(first_rec % blocks_per_page) - 1;
if (wz_stop > last_rec)
wz_stop = last_rec;
wz_count = wz_stop - first_rec + 1;
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++, WRITE, cur_pos, wz_count);
ccw[-1].flags |= CCW_FLAG_CC;
ccw_write_zero(ccw++, wz_count * blksize);
cur_pos = wz_count;
}
/* We can do proper discard when we've got at least blocks_per_page blocks. */
if (last_rec - (first_rec + cur_pos) + 1 >= blocks_per_page) {
/* is last record at page boundary? */
if ((last_rec - blocks_per_page + 1) % blocks_per_page != 0)
d_stop = last_rec - ((last_rec - blocks_per_page + 1) %
blocks_per_page);
else
d_stop = last_rec;
d_count = d_stop - (first_rec + cur_pos) + 1;
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++, WRITE, cur_pos, d_count);
ccw[-1].flags |= CCW_FLAG_CC;
ccw_write_no_data(ccw++);
cur_pos += d_count;
}
/* We might still have some bits left which need to be zeroed. */
if (cur_pos == 0 || first_rec + cur_pos - 1 < last_rec) {
if (d_stop != 0)
wz_count = last_rec - d_stop;
else if (wz_stop != 0)
wz_count = last_rec - wz_stop;
else
wz_count = count;
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++, WRITE, cur_pos, wz_count);
ccw[-1].flags |= CCW_FLAG_CC;
ccw_write_zero(ccw++, wz_count * blksize);
}
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev;
cqr->memdev = memdev;
cqr->block = block;
cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
cqr->retries = memdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
static struct dasd_ccw_req *dasd_fba_build_cp_regular(
struct dasd_device *memdev,
struct dasd_block *block,
struct request *req)
{
struct dasd_fba_private *private = block->base->private;
unsigned long *idaws;
struct LO_fba_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec bv;
char *dst;
int count, cidaw, cplength, datasize;
sector_t recid, first_rec, last_rec;
unsigned int blksize, off;
unsigned char cmd;
if (rq_data_dir(req) == READ) {
cmd = DASD_FBA_CCW_READ;
} else if (rq_data_dir(req) == WRITE) {
cmd = DASD_FBA_CCW_WRITE;
} else
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
first_rec = blk_rq_pos(req) >> block->s2b_shift;
last_rec =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
rq_for_each_segment(bv, req, iter) {
if (bv.bv_len & (blksize - 1))
/* Fba can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv.bv_len >> (block->s2b_shift + 9);
if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
cidaw += bv.bv_len / blksize;
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
return ERR_PTR(-EINVAL);
/* 1x define extent + 1x locate record + number of blocks */
cplength = 2 + count;
/* 1x define extent + 1x locate record */
datasize = sizeof(struct DE_fba_data) + sizeof(struct LO_fba_data) +
cidaw * sizeof(unsigned long);
/*
* Find out number of additional locate record ccws if the device
* can't do data chaining.
*/
if (private->rdc_data.mode.bits.data_chain == 0) {
cplength += count - 1;
datasize += (count - 1)*sizeof(struct LO_fba_data);
}
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
/* First ccw is define extent. */
define_extent(ccw++, cqr->data, rq_data_dir(req),
block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
/* Build locate_record + read/write ccws. */
idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
LO_data = (struct LO_fba_data *) (idaws + cidaw);
/* Locate record for all blocks for smart devices. */
if (private->rdc_data.mode.bits.data_chain != 0) {
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
}
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
if (copy && rq_data_dir(req) == WRITE)
memcpy(copy + bv.bv_offset, dst, bv.bv_len);
if (copy)
dst = copy + bv.bv_offset;
}
for (off = 0; off < bv.bv_len; off += blksize) {
/* Locate record for stupid devices. */
if (private->rdc_data.mode.bits.data_chain == 0) {
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw, LO_data++,
rq_data_dir(req),
recid - first_rec, 1);
ccw->flags = CCW_FLAG_CC;
ccw++;
} else {
if (recid > first_rec)
ccw[-1].flags |= CCW_FLAG_DC;
else
ccw[-1].flags |= CCW_FLAG_CC;
}
ccw->cmd_code = cmd;
ccw->count = block->bp_block;
if (idal_is_needed(dst, blksize)) {
ccw->cda = (__u32)virt_to_phys(idaws);
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
ccw->cda = (__u32)virt_to_phys(dst);
ccw->flags = 0;
}
ccw++;
dst += blksize;
recid++;
}
}
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = memdev;
cqr->memdev = memdev;
cqr->block = block;
cqr->expires = memdev->default_expires * HZ; /* default 5 minutes */
cqr->retries = memdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device *memdev,
struct dasd_block *block,
struct request *req)
{
if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_ZEROES)
return dasd_fba_build_cp_discard(memdev, block, req);
else
return dasd_fba_build_cp_regular(memdev, block, req);
}
static int
dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
struct dasd_fba_private *private = cqr->block->base->private;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec bv;
char *dst, *cda;
unsigned int blksize, off;
int status;
if (!dasd_page_cache)
goto out;
blksize = cqr->block->bp_block;
ccw = cqr->cpaddr;
/* Skip over define extent & locate record. */
ccw++;
if (private->rdc_data.mode.bits.data_chain != 0)
ccw++;
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
/* Skip locate record. */
if (private->rdc_data.mode.bits.data_chain == 0)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
cda = *((char **)phys_to_virt(ccw->cda));
else
cda = phys_to_virt(ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv.bv_len);
kmem_cache_free(dasd_page_cache,
(void *)((addr_t)cda & PAGE_MASK));
}
dst = NULL;
}
ccw++;
}
}
out:
status = cqr->status == DASD_CQR_DONE;
dasd_sfree_request(cqr, cqr->memdev);
return status;
}
static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
{
if (cqr->retries < 0)
cqr->status = DASD_CQR_FAILED;
else
cqr->status = DASD_CQR_FILLED;
};
static int
dasd_fba_fill_info(struct dasd_device * device,
struct dasd_information2_t * info)
{
struct dasd_fba_private *private = device->private;
info->label_block = 1;
info->FBA_layout = 1;
info->format = DASD_FORMAT_LDL;
info->characteristics_size = sizeof(private->rdc_data);
memcpy(info->characteristics, &private->rdc_data,
sizeof(private->rdc_data));
info->confdata_size = 0;
return 0;
}
static void
dasd_fba_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
char *reason)
{
u64 *sense;
sense = (u64 *) dasd_get_sense(irb);
if (sense) {
DBF_DEV_EVENT(DBF_EMERG, device,
"%s: %s %02x%02x%02x %016llx %016llx %016llx "
"%016llx", reason,
scsw_is_tm(&irb->scsw) ? "t" : "c",
scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
scsw_dstat(&irb->scsw), sense[0], sense[1],
sense[2], sense[3]);
} else {
DBF_DEV_EVENT(DBF_EMERG, device, "%s",
"SORRY - NO VALID SENSE AVAILABLE\n");
}
}
static void
dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *irb)
{
char *page;
struct ccw1 *act, *end, *last;
int len, sl, sct, count;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"No memory to dump sense data");
return;
}
len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
len += sprintf(page + len, PRINTK_HEADER
" in req: %p CS: 0x%02X DS: 0x%02X\n", req,
irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
(void *) (addr_t) irb->scsw.cmd.cpa);
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
len += sprintf(page + len, " %02x",
irb->ecw[8 * sl + sct]);
}
len += sprintf(page + len, "\n");
}
} else {
len += sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
printk(KERN_ERR "%s", page);
/* dump the Channel Program */
/* print first CCWs (maximum 8) */
act = req->cpaddr;
for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
end = min(act + 8, last);
len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
while (act <= end) {
len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
printk(KERN_ERR "%s", page);
/* print failing CCW area */
len = 0;
if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
len += sprintf(page + len, PRINTK_HEADER "......\n");
}
end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
while (act <= end) {
len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
/* print last CCWs */
if (act < last - 2) {
act = last - 2;
len += sprintf(page + len, PRINTK_HEADER "......\n");
}
while (act <= last) {
len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
((int *) (addr_t) act->cda)
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
if (len > 0)
printk(KERN_ERR "%s", page);
free_page((unsigned long) page);
}
/*
* Initialize block layer request queue.
*/
static void dasd_fba_setup_blk_queue(struct dasd_block *block)
{
unsigned int logical_block_size = block->bp_block;
struct request_queue *q = block->gdp->queue;
unsigned int max_bytes, max_discard_sectors;
int max;
max = DASD_FBA_MAX_BLOCKS << block->s2b_shift;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
q->limits.max_dev_sectors = max;
blk_queue_logical_block_size(q, logical_block_size);
blk_queue_max_hw_sectors(q, max);
blk_queue_max_segments(q, USHRT_MAX);
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
q->limits.discard_granularity = logical_block_size;
/* Calculate max_discard_sectors and make it PAGE aligned */
max_bytes = USHRT_MAX * logical_block_size;
max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
max_discard_sectors = max_bytes / logical_block_size;
blk_queue_max_discard_sectors(q, max_discard_sectors);
blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
}
static int dasd_fba_pe_handler(struct dasd_device *device,
__u8 tbvpm, __u8 fcsecpm)
{
return dasd_generic_verify_path(device, tbvpm);
}
static struct dasd_discipline dasd_fba_discipline = {
.owner = THIS_MODULE,
.name = "FBA ",
.ebcname = "FBA ",
.check_device = dasd_fba_check_characteristics,
.do_analysis = dasd_fba_do_analysis,
.pe_handler = dasd_fba_pe_handler,
.setup_blk_queue = dasd_fba_setup_blk_queue,
.fill_geometry = dasd_fba_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
.handle_terminated_request = dasd_fba_handle_terminated_request,
.erp_action = dasd_fba_erp_action,
.erp_postaction = dasd_fba_erp_postaction,
.check_for_device_change = dasd_fba_check_for_device_change,
.build_cp = dasd_fba_build_cp,
.free_cp = dasd_fba_free_cp,
.dump_sense = dasd_fba_dump_sense,
.dump_sense_dbf = dasd_fba_dump_sense_dbf,
.fill_info = dasd_fba_fill_info,
};
static int __init
dasd_fba_init(void)
{
int ret;
ASCEBC(dasd_fba_discipline.ebcname, 4);
dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!dasd_fba_zero_page)
return -ENOMEM;
ret = ccw_driver_register(&dasd_fba_driver);
if (!ret)
wait_for_device_probe();
return ret;
}
static void __exit
dasd_fba_cleanup(void)
{
ccw_driver_unregister(&dasd_fba_driver);
free_page((unsigned long)dasd_fba_zero_page);
}
module_init(dasd_fba_init);
module_exit(dasd_fba_cleanup);
| linux-master | drivers/s390/block/dasd_fba.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Character device driver for extended error reporting.
*
* Copyright IBM Corp. 2005
* extended error reporting for DASD ECKD devices
* Author(s): Stefan Weinhuber <[email protected]>
*/
#define KMSG_COMPONENT "dasd-eckd"
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
#include <asm/ebcdic.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
#ifdef PRINTK_HEADER
#undef PRINTK_HEADER
#endif /* PRINTK_HEADER */
#define PRINTK_HEADER "dasd(eer):"
/*
* SECTION: the internal buffer
*/
/*
* The internal buffer is meant to store obaque blobs of data, so it does
* not know of higher level concepts like triggers.
* It consists of a number of pages that are used as a ringbuffer. Each data
* blob is stored in a simple record that consists of an integer, which
* contains the size of the following data, and the data bytes themselfes.
*
* To allow for multiple independent readers we create one internal buffer
* each time the device is opened and destroy the buffer when the file is
* closed again. The number of pages used for this buffer is determined by
* the module parmeter eer_pages.
*
* One record can be written to a buffer by using the functions
* - dasd_eer_start_record (one time per record to write the size to the
* buffer and reserve the space for the data)
* - dasd_eer_write_buffer (one or more times per record to write the data)
* The data can be written in several steps but you will have to compute
* the total size up front for the invocation of dasd_eer_start_record.
* If the ringbuffer is full, dasd_eer_start_record will remove the required
* number of old records.
*
* A record is typically read in two steps, first read the integer that
* specifies the size of the following data, then read the data.
* Both can be done by
* - dasd_eer_read_buffer
*
* For all mentioned functions you need to get the bufferlock first and keep
* it until a complete record is written or read.
*
* All information necessary to keep track of an internal buffer is kept in
* a struct eerbuffer. The buffer specific to a file pointer is strored in
* the private_data field of that file. To be able to write data to all
* existing buffers, each buffer is also added to the bufferlist.
* If the user does not want to read a complete record in one go, we have to
* keep track of the rest of the record. residual stores the number of bytes
* that are still to deliver. If the rest of the record is invalidated between
* two reads then residual will be set to -1 so that the next read will fail.
* All entries in the eerbuffer structure are protected with the bufferlock.
* To avoid races between writing to a buffer on the one side and creating
* and destroying buffers on the other side, the bufferlock must also be used
* to protect the bufferlist.
*/
static int eer_pages = 5;
module_param(eer_pages, int, S_IRUGO|S_IWUSR);
struct eerbuffer {
struct list_head list;
char **buffer;
int buffersize;
int buffer_page_count;
int head;
int tail;
int residual;
};
static LIST_HEAD(bufferlist);
static DEFINE_SPINLOCK(bufferlock);
static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
/*
* How many free bytes are available on the buffer.
* Needs to be called with bufferlock held.
*/
static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
{
if (eerb->head < eerb->tail)
return eerb->tail - eerb->head - 1;
return eerb->buffersize - eerb->head + eerb->tail -1;
}
/*
* How many bytes of buffer space are used.
* Needs to be called with bufferlock held.
*/
static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
{
if (eerb->head >= eerb->tail)
return eerb->head - eerb->tail;
return eerb->buffersize - eerb->tail + eerb->head;
}
/*
* The dasd_eer_write_buffer function just copies count bytes of data
* to the buffer. Make sure to call dasd_eer_start_record first, to
* make sure that enough free space is available.
* Needs to be called with bufferlock held.
*/
static void dasd_eer_write_buffer(struct eerbuffer *eerb,
char *data, int count)
{
unsigned long headindex,localhead;
unsigned long rest, len;
char *nextdata;
nextdata = data;
rest = count;
while (rest > 0) {
headindex = eerb->head / PAGE_SIZE;
localhead = eerb->head % PAGE_SIZE;
len = min(rest, PAGE_SIZE - localhead);
memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
nextdata += len;
rest -= len;
eerb->head += len;
if (eerb->head == eerb->buffersize)
eerb->head = 0; /* wrap around */
BUG_ON(eerb->head > eerb->buffersize);
}
}
/*
* Needs to be called with bufferlock held.
*/
static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
{
unsigned long tailindex,localtail;
unsigned long rest, len, finalcount;
char *nextdata;
finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
nextdata = data;
rest = finalcount;
while (rest > 0) {
tailindex = eerb->tail / PAGE_SIZE;
localtail = eerb->tail % PAGE_SIZE;
len = min(rest, PAGE_SIZE - localtail);
memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
nextdata += len;
rest -= len;
eerb->tail += len;
if (eerb->tail == eerb->buffersize)
eerb->tail = 0; /* wrap around */
BUG_ON(eerb->tail > eerb->buffersize);
}
return finalcount;
}
/*
* Whenever you want to write a blob of data to the internal buffer you
* have to start by using this function first. It will write the number
* of bytes that will be written to the buffer. If necessary it will remove
* old records to make room for the new one.
* Needs to be called with bufferlock held.
*/
static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
{
int tailcount;
if (count + sizeof(count) > eerb->buffersize)
return -ENOMEM;
while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
if (eerb->residual > 0) {
eerb->tail += eerb->residual;
if (eerb->tail >= eerb->buffersize)
eerb->tail -= eerb->buffersize;
eerb->residual = -1;
}
dasd_eer_read_buffer(eerb, (char *) &tailcount,
sizeof(tailcount));
eerb->tail += tailcount;
if (eerb->tail >= eerb->buffersize)
eerb->tail -= eerb->buffersize;
}
dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
return 0;
};
/*
* Release pages that are not used anymore.
*/
static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
{
int i;
for (i = 0; i < no_pages; i++)
free_page((unsigned long) buf[i]);
}
/*
* Allocate a new set of memory pages.
*/
static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
{
int i;
for (i = 0; i < no_pages; i++) {
buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
if (!buf[i]) {
dasd_eer_free_buffer_pages(buf, i);
return -ENOMEM;
}
}
return 0;
}
/*
* SECTION: The extended error reporting functionality
*/
/*
* When a DASD device driver wants to report an error, it calls the
* function dasd_eer_write and gives the respective trigger ID as
* parameter. Currently there are four kinds of triggers:
*
* DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems
* DASD_EER_PPRCSUSPEND: PPRC was suspended
* DASD_EER_NOPATH: There is no path to the device left.
* DASD_EER_STATECHANGE: The state of the device has changed.
*
* For the first three triggers all required information can be supplied by
* the caller. For these triggers a record is written by the function
* dasd_eer_write_standard_trigger.
*
* The DASD_EER_STATECHANGE trigger is special since a sense subsystem
* status ccw need to be executed to gather the necessary sense data first.
* The dasd_eer_snss function will queue the SNSS request and the request
* callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
* trigger.
*
* To avoid memory allocations at runtime, the necessary memory is allocated
* when the extended error reporting is enabled for a device (by
* dasd_eer_probe). There is one sense subsystem status request for each
* eer enabled DASD device. The presence of the cqr in device->eer_cqr
* indicates that eer is enable for the device. The use of the snss request
* is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
* that the cqr is currently in use, dasd_eer_snss cannot start a second
* request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
* the SNSS request will check the bit and call dasd_eer_snss again.
*/
#define SNSS_DATA_SIZE 44
#define DASD_EER_BUSID_SIZE 10
struct dasd_eer_header {
__u32 total_size;
__u32 trigger;
__u64 tv_sec;
__u64 tv_usec;
char busid[DASD_EER_BUSID_SIZE];
} __attribute__ ((packed));
/*
* The following function can be used for those triggers that have
* all necessary data available when the function is called.
* If the parameter cqr is not NULL, the chain of requests will be searched
* for valid sense data, and all valid sense data sets will be added to
* the triggers data.
*/
static void dasd_eer_write_standard_trigger(struct dasd_device *device,
struct dasd_ccw_req *cqr,
int trigger)
{
struct dasd_ccw_req *temp_cqr;
int data_size;
struct timespec64 ts;
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
char *sense;
/* go through cqr chain and count the valid sense data sets */
data_size = 0;
for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
if (dasd_get_sense(&temp_cqr->irb))
data_size += 32;
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
header.trigger = trigger;
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
strscpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
list_for_each_entry(eerb, &bufferlist, list) {
dasd_eer_start_record(eerb, header.total_size);
dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
sense = dasd_get_sense(&temp_cqr->irb);
if (sense)
dasd_eer_write_buffer(eerb, sense, 32);
}
dasd_eer_write_buffer(eerb, "EOR", 4);
}
spin_unlock_irqrestore(&bufferlock, flags);
wake_up_interruptible(&dasd_eer_read_wait_queue);
}
/*
* This function writes a DASD_EER_STATECHANGE trigger.
*/
static void dasd_eer_write_snss_trigger(struct dasd_device *device,
struct dasd_ccw_req *cqr,
int trigger)
{
int data_size;
int snss_rc;
struct timespec64 ts;
struct dasd_eer_header header;
unsigned long flags;
struct eerbuffer *eerb;
snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
if (snss_rc)
data_size = 0;
else
data_size = SNSS_DATA_SIZE;
header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
header.trigger = DASD_EER_STATECHANGE;
ktime_get_real_ts64(&ts);
header.tv_sec = ts.tv_sec;
header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
strscpy(header.busid, dev_name(&device->cdev->dev),
DASD_EER_BUSID_SIZE);
spin_lock_irqsave(&bufferlock, flags);
list_for_each_entry(eerb, &bufferlist, list) {
dasd_eer_start_record(eerb, header.total_size);
dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
if (!snss_rc)
dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
dasd_eer_write_buffer(eerb, "EOR", 4);
}
spin_unlock_irqrestore(&bufferlock, flags);
wake_up_interruptible(&dasd_eer_read_wait_queue);
}
/*
* This function is called for all triggers. It calls the appropriate
* function that writes the actual trigger records.
*/
void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
unsigned int id)
{
if (!device->eer_cqr)
return;
switch (id) {
case DASD_EER_FATALERROR:
case DASD_EER_PPRCSUSPEND:
dasd_eer_write_standard_trigger(device, cqr, id);
break;
case DASD_EER_NOPATH:
case DASD_EER_NOSPC:
case DASD_EER_AUTOQUIESCE:
dasd_eer_write_standard_trigger(device, NULL, id);
break;
case DASD_EER_STATECHANGE:
dasd_eer_write_snss_trigger(device, cqr, id);
break;
default: /* unknown trigger, so we write it without any sense data */
dasd_eer_write_standard_trigger(device, NULL, id);
break;
}
}
EXPORT_SYMBOL(dasd_eer_write);
/*
* Start a sense subsystem status request.
* Needs to be called with the device held.
*/
void dasd_eer_snss(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
cqr = device->eer_cqr;
if (!cqr) /* Device not eer enabled. */
return;
if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
/* Sense subsystem status request in use. */
set_bit(DASD_FLAG_EER_SNSS, &device->flags);
return;
}
/* cdev is already locked, can't use dasd_add_request_head */
clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
cqr->status = DASD_CQR_QUEUED;
list_add(&cqr->devlist, &device->ccw_queue);
dasd_schedule_device_bh(device);
}
/*
* Callback function for use with sense subsystem status request.
*/
static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
{
struct dasd_device *device = cqr->startdev;
unsigned long flags;
dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (device->eer_cqr == cqr) {
clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
/* Another SNSS has been requested in the meantime. */
dasd_eer_snss(device);
cqr = NULL;
}
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr)
/*
* Extended error recovery has been switched off while
* the SNSS request was running. It could even have
* been switched off and on again in which case there
* is a new ccw in device->eer_cqr. Free the "old"
* snss request now.
*/
dasd_sfree_request(cqr, device);
}
/*
* Enable error reporting on a given device.
*/
int dasd_eer_enable(struct dasd_device *device)
{
struct dasd_ccw_req *cqr = NULL;
unsigned long flags;
struct ccw1 *ccw;
int rc = 0;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (device->eer_cqr)
goto out;
else if (!device->discipline ||
strcmp(device->discipline->name, "ECKD"))
rc = -EMEDIUMTYPE;
else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
rc = -EBUSY;
if (rc)
goto out;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
SNSS_DATA_SIZE, device, NULL);
if (IS_ERR(cqr)) {
rc = -ENOMEM;
cqr = NULL;
goto out;
}
cqr->startdev = device;
cqr->retries = 255;
cqr->expires = 10 * HZ;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_SNSS;
ccw->count = SNSS_DATA_SIZE;
ccw->flags = 0;
ccw->cda = (__u32)virt_to_phys(cqr->data);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
cqr->callback = dasd_eer_snss_cb;
if (!device->eer_cqr) {
device->eer_cqr = cqr;
cqr = NULL;
}
out:
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr)
dasd_sfree_request(cqr, device);
return rc;
}
/*
* Disable error reporting on a given device.
*/
void dasd_eer_disable(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
unsigned long flags;
int in_use;
if (!device->eer_cqr)
return;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
cqr = device->eer_cqr;
device->eer_cqr = NULL;
clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr && !in_use)
dasd_sfree_request(cqr, device);
}
/*
* SECTION: the device operations
*/
/*
* On the one side we need a lock to access our internal buffer, on the
* other side a copy_to_user can sleep. So we need to copy the data we have
* to transfer in a readbuffer, which is protected by the readbuffer_mutex.
*/
static char readbuffer[PAGE_SIZE];
static DEFINE_MUTEX(readbuffer_mutex);
static int dasd_eer_open(struct inode *inp, struct file *filp)
{
struct eerbuffer *eerb;
unsigned long flags;
eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
if (!eerb)
return -ENOMEM;
eerb->buffer_page_count = eer_pages;
if (eerb->buffer_page_count < 1 ||
eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
kfree(eerb);
DBF_EVENT(DBF_WARNING, "can't open device since module "
"parameter eer_pages is smaller than 1 or"
" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
return -EINVAL;
}
eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
eerb->buffer = kmalloc_array(eerb->buffer_page_count, sizeof(char *),
GFP_KERNEL);
if (!eerb->buffer) {
kfree(eerb);
return -ENOMEM;
}
if (dasd_eer_allocate_buffer_pages(eerb->buffer,
eerb->buffer_page_count)) {
kfree(eerb->buffer);
kfree(eerb);
return -ENOMEM;
}
filp->private_data = eerb;
spin_lock_irqsave(&bufferlock, flags);
list_add(&eerb->list, &bufferlist);
spin_unlock_irqrestore(&bufferlock, flags);
return nonseekable_open(inp,filp);
}
static int dasd_eer_close(struct inode *inp, struct file *filp)
{
struct eerbuffer *eerb;
unsigned long flags;
eerb = (struct eerbuffer *) filp->private_data;
spin_lock_irqsave(&bufferlock, flags);
list_del(&eerb->list);
spin_unlock_irqrestore(&bufferlock, flags);
dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
kfree(eerb->buffer);
kfree(eerb);
return 0;
}
static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
int tc,rc;
int tailcount,effective_count;
unsigned long flags;
struct eerbuffer *eerb;
eerb = (struct eerbuffer *) filp->private_data;
if (mutex_lock_interruptible(&readbuffer_mutex))
return -ERESTARTSYS;
spin_lock_irqsave(&bufferlock, flags);
if (eerb->residual < 0) { /* the remainder of this record */
/* has been deleted */
eerb->residual = 0;
spin_unlock_irqrestore(&bufferlock, flags);
mutex_unlock(&readbuffer_mutex);
return -EIO;
} else if (eerb->residual > 0) {
/* OK we still have a second half of a record to deliver */
effective_count = min(eerb->residual, (int) count);
eerb->residual -= effective_count;
} else {
tc = 0;
while (!tc) {
tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
sizeof(tailcount));
if (!tc) {
/* no data available */
spin_unlock_irqrestore(&bufferlock, flags);
mutex_unlock(&readbuffer_mutex);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
rc = wait_event_interruptible(
dasd_eer_read_wait_queue,
eerb->head != eerb->tail);
if (rc)
return rc;
if (mutex_lock_interruptible(&readbuffer_mutex))
return -ERESTARTSYS;
spin_lock_irqsave(&bufferlock, flags);
}
}
WARN_ON(tc != sizeof(tailcount));
effective_count = min(tailcount,(int)count);
eerb->residual = tailcount - effective_count;
}
tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
WARN_ON(tc != effective_count);
spin_unlock_irqrestore(&bufferlock, flags);
if (copy_to_user(buf, readbuffer, effective_count)) {
mutex_unlock(&readbuffer_mutex);
return -EFAULT;
}
mutex_unlock(&readbuffer_mutex);
return effective_count;
}
static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
{
__poll_t mask;
unsigned long flags;
struct eerbuffer *eerb;
eerb = (struct eerbuffer *) filp->private_data;
poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
spin_lock_irqsave(&bufferlock, flags);
if (eerb->head != eerb->tail)
mask = EPOLLIN | EPOLLRDNORM ;
else
mask = 0;
spin_unlock_irqrestore(&bufferlock, flags);
return mask;
}
static const struct file_operations dasd_eer_fops = {
.open = &dasd_eer_open,
.release = &dasd_eer_close,
.read = &dasd_eer_read,
.poll = &dasd_eer_poll,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
static struct miscdevice *dasd_eer_dev = NULL;
int __init dasd_eer_init(void)
{
int rc;
dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
if (!dasd_eer_dev)
return -ENOMEM;
dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
dasd_eer_dev->name = "dasd_eer";
dasd_eer_dev->fops = &dasd_eer_fops;
rc = misc_register(dasd_eer_dev);
if (rc) {
kfree(dasd_eer_dev);
dasd_eer_dev = NULL;
DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
"register misc device");
return rc;
}
return 0;
}
void dasd_eer_exit(void)
{
if (dasd_eer_dev) {
misc_deregister(dasd_eer_dev);
kfree(dasd_eer_dev);
dasd_eer_dev = NULL;
}
}
| linux-master | drivers/s390/block/dasd_eer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <[email protected]>
* Horst Hummel <[email protected]>
* Carsten Otte <[email protected]>
* Martin Schwidefsky <[email protected]>
* Bugreports.to..: <[email protected]>
* Copyright IBM Corp. 1999, 2001
*
*/
#define KMSG_COMPONENT "dasd"
#include <linux/ctype.h>
#include <linux/init.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
#include <linux/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_erp:"
#include "dasd_int.h"
struct dasd_ccw_req *
dasd_alloc_erp_request(unsigned int magic, int cplength, int datasize,
struct dasd_device * device)
{
unsigned long flags;
struct dasd_ccw_req *cqr;
char *data;
int size;
/* Sanity checks */
BUG_ON(datasize > PAGE_SIZE ||
(cplength*sizeof(struct ccw1)) > PAGE_SIZE);
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
if (cplength > 0)
size += cplength * sizeof(struct ccw1);
if (datasize > 0)
size += datasize;
spin_lock_irqsave(&device->mem_lock, flags);
cqr = (struct dasd_ccw_req *)
dasd_alloc_chunk(&device->erp_chunks, size);
spin_unlock_irqrestore(&device->mem_lock, flags);
if (cqr == NULL)
return ERR_PTR(-ENOMEM);
memset(cqr, 0, sizeof(struct dasd_ccw_req));
INIT_LIST_HEAD(&cqr->devlist);
INIT_LIST_HEAD(&cqr->blocklist);
data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
cqr->cpaddr = NULL;
if (cplength > 0) {
cqr->cpaddr = (struct ccw1 *) data;
data += cplength*sizeof(struct ccw1);
memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
}
cqr->data = NULL;
if (datasize > 0) {
cqr->data = data;
memset(cqr->data, 0, datasize);
}
cqr->magic = magic;
ASCEBC((char *) &cqr->magic, 4);
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
}
void
dasd_free_erp_request(struct dasd_ccw_req *cqr, struct dasd_device * device)
{
unsigned long flags;
spin_lock_irqsave(&device->mem_lock, flags);
dasd_free_chunk(&device->erp_chunks, cqr);
spin_unlock_irqrestore(&device->mem_lock, flags);
atomic_dec(&device->ref_count);
}
/*
* dasd_default_erp_action just retries the current cqr
*/
struct dasd_ccw_req *
dasd_default_erp_action(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
device = cqr->startdev;
/* just retry - there is nothing to save ... I got no sense data.... */
if (cqr->retries > 0) {
DBF_DEV_EVENT(DBF_DEBUG, device,
"default ERP called (%i retries left)",
cqr->retries);
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
cqr->lpm = dasd_path_get_opm(device);
cqr->status = DASD_CQR_FILLED;
} else {
pr_err("%s: default ERP has run out of retries and failed\n",
dev_name(&device->cdev->dev));
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_tod_clock();
}
return cqr;
} /* end dasd_default_erp_action */
/*
* DESCRIPTION
* Frees all ERPs of the current ERP Chain and set the status
* of the original CQR either to DASD_CQR_DONE if ERP was successful
* or to DASD_CQR_FAILED if ERP was NOT successful.
* NOTE: This function is only called if no discipline postaction
* is available
*
* PARAMETER
* erp current erp_head
*
* RETURN VALUES
* cqr pointer to the original CQR
*/
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
{
int success;
unsigned long startclk, stopclk;
struct dasd_device *startdev;
BUG_ON(cqr->refers == NULL || cqr->function == NULL);
success = cqr->status == DASD_CQR_DONE;
startclk = cqr->startclk;
stopclk = cqr->stopclk;
startdev = cqr->startdev;
/* free all ERPs - but NOT the original cqr */
while (cqr->refers != NULL) {
struct dasd_ccw_req *refers;
refers = cqr->refers;
/* remove the request from the block queue */
list_del(&cqr->blocklist);
/* free the finished erp request */
dasd_free_erp_request(cqr, cqr->memdev);
cqr = refers;
}
/* set corresponding status to original cqr */
cqr->startclk = startclk;
cqr->stopclk = stopclk;
cqr->startdev = startdev;
if (success)
cqr->status = DASD_CQR_DONE;
else {
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_tod_clock();
}
return cqr;
} /* end default_erp_postaction */
void
dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
{
struct dasd_device *device;
device = cqr->startdev;
if (cqr->intrc == -ETIMEDOUT) {
dev_err(&device->cdev->dev,
"A timeout error occurred for cqr %p\n", cqr);
return;
}
if (cqr->intrc == -ENOLINK) {
dev_err(&device->cdev->dev,
"A transport error occurred for cqr %p\n", cqr);
return;
}
/* dump sense data */
if (device->discipline && device->discipline->dump_sense)
device->discipline->dump_sense(device, cqr, irb);
}
void
dasd_log_sense_dbf(struct dasd_ccw_req *cqr, struct irb *irb)
{
struct dasd_device *device;
device = cqr->startdev;
/* dump sense data to s390 debugfeature*/
if (device->discipline && device->discipline->dump_sense_dbf)
device->discipline->dump_sense_dbf(device, irb, "log");
}
EXPORT_SYMBOL(dasd_log_sense_dbf);
EXPORT_SYMBOL(dasd_default_erp_action);
EXPORT_SYMBOL(dasd_default_erp_postaction);
EXPORT_SYMBOL(dasd_alloc_erp_request);
EXPORT_SYMBOL(dasd_free_erp_request);
EXPORT_SYMBOL(dasd_log_sense);
| linux-master | drivers/s390/block/dasd_erp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <[email protected]>
* Horst Hummel <[email protected]>
* Carsten Otte <[email protected]>
* Martin Schwidefsky <[email protected]>
* Bugreports.to..: <[email protected]>
* Copyright IBM Corp. 1999,2001
*
* Device mapping and dasd= parameter parsing functions. All devmap
* functions may not be called from interrupt context. In particular
* dasd_get_device is a no-no from interrupt context.
*
*/
#define KMSG_COMPONENT "dasd"
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/debug.h>
#include <linux/uaccess.h>
#include <asm/ipl.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_devmap:"
#define DASD_MAX_PARAMS 256
#include "dasd_int.h"
struct kmem_cache *dasd_page_cache;
EXPORT_SYMBOL_GPL(dasd_page_cache);
/*
* dasd_devmap_t is used to store the features and the relation
* between device number and device index. To find a dasd_devmap_t
* that corresponds to a device number of a device index each
* dasd_devmap_t is added to two linked lists, one to search by
* the device number and one to search by the device index. As
* soon as big minor numbers are available the device index list
* can be removed since the device number will then be identical
* to the device index.
*/
struct dasd_devmap {
struct list_head list;
char bus_id[DASD_BUS_ID_SIZE];
unsigned int devindex;
unsigned short features;
struct dasd_device *device;
struct dasd_copy_relation *copy;
unsigned int aq_mask;
};
/*
* Parameter parsing functions for dasd= parameter. The syntax is:
* <devno> : (0x)?[0-9a-fA-F]+
* <busid> : [0-0a-f]\.[0-9a-f]\.(0x)?[0-9a-fA-F]+
* <feature> : ro
* <feature_list> : \(<feature>(:<feature>)*\)
* <devno-range> : <devno>(-<devno>)?<feature_list>?
* <busid-range> : <busid>(-<busid>)?<feature_list>?
* <devices> : <devno-range>|<busid-range>
* <dasd_module> : dasd_diag_mod|dasd_eckd_mod|dasd_fba_mod
*
* <dasd> : autodetect|probeonly|<devices>(,<devices>)*
*/
int dasd_probeonly = 0; /* is true, when probeonly mode is active */
int dasd_autodetect = 0; /* is true, when autodetection is active */
int dasd_nopav = 0; /* is true, when PAV is disabled */
EXPORT_SYMBOL_GPL(dasd_nopav);
int dasd_nofcx; /* disable High Performance Ficon */
EXPORT_SYMBOL_GPL(dasd_nofcx);
/*
* char *dasd[] is intended to hold the ranges supplied by the dasd= statement
* it is named 'dasd' to directly be filled by insmod with the comma separated
* strings when running as a module.
*/
static char *dasd[DASD_MAX_PARAMS];
module_param_array(dasd, charp, NULL, S_IRUGO);
/*
* Single spinlock to protect devmap and servermap structures and lists.
*/
static DEFINE_SPINLOCK(dasd_devmap_lock);
/*
* Hash lists for devmap structures.
*/
static struct list_head dasd_hashlists[256];
int dasd_max_devindex;
static struct dasd_devmap *dasd_add_busid(const char *, int);
static inline int
dasd_hash_busid(const char *bus_id)
{
int hash, i;
hash = 0;
for (i = 0; (i < DASD_BUS_ID_SIZE) && *bus_id; i++, bus_id++)
hash += *bus_id;
return hash & 0xff;
}
#ifndef MODULE
static int __init dasd_call_setup(char *opt)
{
static int i __initdata;
char *tmp;
while (i < DASD_MAX_PARAMS) {
tmp = strsep(&opt, ",");
if (!tmp)
break;
dasd[i++] = tmp;
}
return 1;
}
__setup ("dasd=", dasd_call_setup);
#endif /* #ifndef MODULE */
#define DASD_IPLDEV "ipldev"
/*
* Read a device busid/devno from a string.
*/
static int dasd_busid(char *str, int *id0, int *id1, int *devno)
{
unsigned int val;
char *tok;
/* Interpret ipldev busid */
if (strncmp(DASD_IPLDEV, str, strlen(DASD_IPLDEV)) == 0) {
if (ipl_info.type != IPL_TYPE_CCW) {
pr_err("The IPL device is not a CCW device\n");
return -EINVAL;
}
*id0 = 0;
*id1 = ipl_info.data.ccw.dev_id.ssid;
*devno = ipl_info.data.ccw.dev_id.devno;
return 0;
}
/* Old style 0xXXXX or XXXX */
if (!kstrtouint(str, 16, &val)) {
*id0 = *id1 = 0;
if (val > 0xffff)
return -EINVAL;
*devno = val;
return 0;
}
/* New style x.y.z busid */
tok = strsep(&str, ".");
if (kstrtouint(tok, 16, &val) || val > 0xff)
return -EINVAL;
*id0 = val;
tok = strsep(&str, ".");
if (kstrtouint(tok, 16, &val) || val > 0xff)
return -EINVAL;
*id1 = val;
tok = strsep(&str, ".");
if (kstrtouint(tok, 16, &val) || val > 0xffff)
return -EINVAL;
*devno = val;
return 0;
}
/*
* Read colon separated list of dasd features.
*/
static int __init dasd_feature_list(char *str)
{
int features, len, rc;
features = 0;
rc = 0;
if (!str)
return DASD_FEATURE_DEFAULT;
while (1) {
for (len = 0;
str[len] && str[len] != ':' && str[len] != ')'; len++);
if (len == 2 && !strncmp(str, "ro", 2))
features |= DASD_FEATURE_READONLY;
else if (len == 4 && !strncmp(str, "diag", 4))
features |= DASD_FEATURE_USEDIAG;
else if (len == 3 && !strncmp(str, "raw", 3))
features |= DASD_FEATURE_USERAW;
else if (len == 6 && !strncmp(str, "erplog", 6))
features |= DASD_FEATURE_ERPLOG;
else if (len == 8 && !strncmp(str, "failfast", 8))
features |= DASD_FEATURE_FAILFAST;
else {
pr_warn("%.*s is not a supported device option\n",
len, str);
rc = -EINVAL;
}
str += len;
if (*str != ':')
break;
str++;
}
return rc ? : features;
}
/*
* Try to match the first element on the comma separated parse string
* with one of the known keywords. If a keyword is found, take the approprate
* action and return a pointer to the residual string. If the first element
* could not be matched to any keyword then return an error code.
*/
static int __init dasd_parse_keyword(char *keyword)
{
int length = strlen(keyword);
if (strncmp("autodetect", keyword, length) == 0) {
dasd_autodetect = 1;
pr_info("The autodetection mode has been activated\n");
return 0;
}
if (strncmp("probeonly", keyword, length) == 0) {
dasd_probeonly = 1;
pr_info("The probeonly mode has been activated\n");
return 0;
}
if (strncmp("nopav", keyword, length) == 0) {
if (MACHINE_IS_VM)
pr_info("'nopav' is not supported on z/VM\n");
else {
dasd_nopav = 1;
pr_info("PAV support has be deactivated\n");
}
return 0;
}
if (strncmp("nofcx", keyword, length) == 0) {
dasd_nofcx = 1;
pr_info("High Performance FICON support has been "
"deactivated\n");
return 0;
}
if (strncmp("fixedbuffers", keyword, length) == 0) {
if (dasd_page_cache)
return 0;
dasd_page_cache =
kmem_cache_create("dasd_page_cache", PAGE_SIZE,
PAGE_SIZE, SLAB_CACHE_DMA,
NULL);
if (!dasd_page_cache)
DBF_EVENT(DBF_WARNING, "%s", "Failed to create slab, "
"fixed buffer mode disabled.");
else
DBF_EVENT(DBF_INFO, "%s",
"turning on fixed buffer mode");
return 0;
}
return -EINVAL;
}
/*
* Split a string of a device range into its pieces and return the from, to, and
* feature parts separately.
* e.g.:
* 0.0.1234-0.0.5678(ro:erplog) -> from: 0.0.1234 to: 0.0.5678 features: ro:erplog
* 0.0.8765(raw) -> from: 0.0.8765 to: null features: raw
* 0x4321 -> from: 0x4321 to: null features: null
*/
static int __init dasd_evaluate_range_param(char *range, char **from_str,
char **to_str, char **features_str)
{
int rc = 0;
/* Do we have a range or a single device? */
if (strchr(range, '-')) {
*from_str = strsep(&range, "-");
*to_str = strsep(&range, "(");
*features_str = strsep(&range, ")");
} else {
*from_str = strsep(&range, "(");
*features_str = strsep(&range, ")");
}
if (*features_str && !range) {
pr_warn("A closing parenthesis ')' is missing in the dasd= parameter\n");
rc = -EINVAL;
}
return rc;
}
/*
* Try to interprete the range string as a device number or a range of devices.
* If the interpretation is successful, create the matching dasd_devmap entries.
* If interpretation fails or in case of an error, return an error code.
*/
static int __init dasd_parse_range(const char *range)
{
struct dasd_devmap *devmap;
int from, from_id0, from_id1;
int to, to_id0, to_id1;
int features;
char bus_id[DASD_BUS_ID_SIZE + 1];
char *features_str = NULL;
char *from_str = NULL;
char *to_str = NULL;
int rc = 0;
char *tmp;
tmp = kstrdup(range, GFP_KERNEL);
if (!tmp)
return -ENOMEM;
if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str)) {
rc = -EINVAL;
goto out;
}
if (dasd_busid(from_str, &from_id0, &from_id1, &from)) {
rc = -EINVAL;
goto out;
}
to = from;
to_id0 = from_id0;
to_id1 = from_id1;
if (to_str) {
if (dasd_busid(to_str, &to_id0, &to_id1, &to)) {
rc = -EINVAL;
goto out;
}
if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) {
pr_err("%s is not a valid device range\n", range);
rc = -EINVAL;
goto out;
}
}
features = dasd_feature_list(features_str);
if (features < 0) {
rc = -EINVAL;
goto out;
}
/* each device in dasd= parameter should be set initially online */
features |= DASD_FEATURE_INITIAL_ONLINE;
while (from <= to) {
sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++);
devmap = dasd_add_busid(bus_id, features);
if (IS_ERR(devmap)) {
rc = PTR_ERR(devmap);
goto out;
}
}
out:
kfree(tmp);
return rc;
}
/*
* Parse parameters stored in dasd[]
* The 'dasd=...' parameter allows to specify a comma separated list of
* keywords and device ranges. The parameters in that list will be stored as
* separate elementes in dasd[].
*/
int __init dasd_parse(void)
{
int rc, i;
char *cur;
rc = 0;
for (i = 0; i < DASD_MAX_PARAMS; i++) {
cur = dasd[i];
if (!cur)
break;
if (*cur == '\0')
continue;
rc = dasd_parse_keyword(cur);
if (rc)
rc = dasd_parse_range(cur);
if (rc)
break;
}
return rc;
}
/*
* Add a devmap for the device specified by busid. It is possible that
* the devmap already exists (dasd= parameter). The order of the devices
* added through this function will define the kdevs for the individual
* devices.
*/
static struct dasd_devmap *
dasd_add_busid(const char *bus_id, int features)
{
struct dasd_devmap *devmap, *new, *tmp;
int hash;
new = kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL);
if (!new)
return ERR_PTR(-ENOMEM);
spin_lock(&dasd_devmap_lock);
devmap = NULL;
hash = dasd_hash_busid(bus_id);
list_for_each_entry(tmp, &dasd_hashlists[hash], list)
if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
devmap = tmp;
break;
}
if (!devmap) {
/* This bus_id is new. */
new->devindex = dasd_max_devindex++;
strscpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE);
new->features = features;
new->device = NULL;
list_add(&new->list, &dasd_hashlists[hash]);
devmap = new;
new = NULL;
}
spin_unlock(&dasd_devmap_lock);
kfree(new);
return devmap;
}
static struct dasd_devmap *
dasd_find_busid_locked(const char *bus_id)
{
struct dasd_devmap *devmap, *tmp;
int hash;
devmap = ERR_PTR(-ENODEV);
hash = dasd_hash_busid(bus_id);
list_for_each_entry(tmp, &dasd_hashlists[hash], list) {
if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) {
devmap = tmp;
break;
}
}
return devmap;
}
/*
* Find devmap for device with given bus_id.
*/
static struct dasd_devmap *
dasd_find_busid(const char *bus_id)
{
struct dasd_devmap *devmap;
spin_lock(&dasd_devmap_lock);
devmap = dasd_find_busid_locked(bus_id);
spin_unlock(&dasd_devmap_lock);
return devmap;
}
/*
* Check if busid has been added to the list of dasd ranges.
*/
int
dasd_busid_known(const char *bus_id)
{
return IS_ERR(dasd_find_busid(bus_id)) ? -ENOENT : 0;
}
/*
* Forget all about the device numbers added so far.
* This may only be called at module unload or system shutdown.
*/
static void
dasd_forget_ranges(void)
{
struct dasd_devmap *devmap, *n;
int i;
spin_lock(&dasd_devmap_lock);
for (i = 0; i < 256; i++) {
list_for_each_entry_safe(devmap, n, &dasd_hashlists[i], list) {
BUG_ON(devmap->device != NULL);
list_del(&devmap->list);
kfree(devmap);
}
}
spin_unlock(&dasd_devmap_lock);
}
/*
* Find the device struct by its device index.
*/
struct dasd_device *
dasd_device_from_devindex(int devindex)
{
struct dasd_devmap *devmap, *tmp;
struct dasd_device *device;
int i;
spin_lock(&dasd_devmap_lock);
devmap = NULL;
for (i = 0; (i < 256) && !devmap; i++)
list_for_each_entry(tmp, &dasd_hashlists[i], list)
if (tmp->devindex == devindex) {
/* Found the devmap for the device. */
devmap = tmp;
break;
}
if (devmap && devmap->device) {
device = devmap->device;
dasd_get_device(device);
} else
device = ERR_PTR(-ENODEV);
spin_unlock(&dasd_devmap_lock);
return device;
}
/*
* Return devmap for cdev. If no devmap exists yet, create one and
* connect it to the cdev.
*/
static struct dasd_devmap *
dasd_devmap_from_cdev(struct ccw_device *cdev)
{
struct dasd_devmap *devmap;
devmap = dasd_find_busid(dev_name(&cdev->dev));
if (IS_ERR(devmap))
devmap = dasd_add_busid(dev_name(&cdev->dev),
DASD_FEATURE_DEFAULT);
return devmap;
}
/*
* Create a dasd device structure for cdev.
*/
struct dasd_device *
dasd_create_device(struct ccw_device *cdev)
{
struct dasd_devmap *devmap;
struct dasd_device *device;
unsigned long flags;
int rc;
devmap = dasd_devmap_from_cdev(cdev);
if (IS_ERR(devmap))
return (void *) devmap;
device = dasd_alloc_device();
if (IS_ERR(device))
return device;
atomic_set(&device->ref_count, 3);
spin_lock(&dasd_devmap_lock);
if (!devmap->device) {
devmap->device = device;
device->devindex = devmap->devindex;
device->features = devmap->features;
get_device(&cdev->dev);
device->cdev = cdev;
rc = 0;
} else
/* Someone else was faster. */
rc = -EBUSY;
spin_unlock(&dasd_devmap_lock);
if (rc) {
dasd_free_device(device);
return ERR_PTR(rc);
}
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, device);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
device->paths_info = kset_create_and_add("paths_info", NULL,
&device->cdev->dev.kobj);
if (!device->paths_info)
dev_warn(&cdev->dev, "Could not create paths_info kset\n");
return device;
}
/*
* allocate a PPRC data structure and call the discipline function to fill
*/
static int dasd_devmap_get_pprc_status(struct dasd_device *device,
struct dasd_pprc_data_sc4 **data)
{
struct dasd_pprc_data_sc4 *temp;
if (!device->discipline || !device->discipline->pprc_status) {
dev_warn(&device->cdev->dev, "Unable to query copy relation status\n");
return -EOPNOTSUPP;
}
temp = kzalloc(sizeof(*temp), GFP_KERNEL);
if (!temp)
return -ENOMEM;
/* get PPRC information from storage */
if (device->discipline->pprc_status(device, temp)) {
dev_warn(&device->cdev->dev, "Error during copy relation status query\n");
kfree(temp);
return -EINVAL;
}
*data = temp;
return 0;
}
/*
* find an entry in a PPRC device_info array by a given UID
* depending on the primary/secondary state of the device it has to be
* matched with the respective fields
*/
static int dasd_devmap_entry_from_pprc_data(struct dasd_pprc_data_sc4 *data,
struct dasd_uid uid,
bool primary)
{
int i;
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (primary) {
if (data->dev_info[i].prim_cu_ssid == uid.ssid &&
data->dev_info[i].primary == uid.real_unit_addr)
return i;
} else {
if (data->dev_info[i].sec_cu_ssid == uid.ssid &&
data->dev_info[i].secondary == uid.real_unit_addr)
return i;
}
}
return -1;
}
/*
* check the consistency of a specified copy relation by checking
* the following things:
*
* - is the given device part of a copy pair setup
* - does the state of the device match the state in the PPRC status data
* - does the device UID match with the UID in the PPRC status data
* - to prevent misrouted IO check if the given device is present in all
* related PPRC status data
*/
static int dasd_devmap_check_copy_relation(struct dasd_device *device,
struct dasd_copy_entry *entry,
struct dasd_pprc_data_sc4 *data,
struct dasd_copy_relation *copy)
{
struct dasd_pprc_data_sc4 *tmp_dat;
struct dasd_device *tmp_dev;
struct dasd_uid uid;
int i, j;
if (!device->discipline || !device->discipline->get_uid ||
device->discipline->get_uid(device, &uid))
return 1;
i = dasd_devmap_entry_from_pprc_data(data, uid, entry->primary);
if (i < 0) {
dev_warn(&device->cdev->dev, "Device not part of a copy relation\n");
return 1;
}
/* double check which role the current device has */
if (entry->primary) {
if (data->dev_info[i].flags & 0x80) {
dev_warn(&device->cdev->dev, "Copy pair secondary is setup as primary\n");
return 1;
}
if (data->dev_info[i].prim_cu_ssid != uid.ssid ||
data->dev_info[i].primary != uid.real_unit_addr) {
dev_warn(&device->cdev->dev,
"Primary device %s does not match copy pair status primary device %04x\n",
dev_name(&device->cdev->dev),
data->dev_info[i].prim_cu_ssid |
data->dev_info[i].primary);
return 1;
}
} else {
if (!(data->dev_info[i].flags & 0x80)) {
dev_warn(&device->cdev->dev, "Copy pair primary is setup as secondary\n");
return 1;
}
if (data->dev_info[i].sec_cu_ssid != uid.ssid ||
data->dev_info[i].secondary != uid.real_unit_addr) {
dev_warn(&device->cdev->dev,
"Secondary device %s does not match copy pair status secondary device %04x\n",
dev_name(&device->cdev->dev),
data->dev_info[i].sec_cu_ssid |
data->dev_info[i].secondary);
return 1;
}
}
/*
* the current device has to be part of the copy relation of all
* entries to prevent misrouted IO to another copy pair
*/
for (j = 0; j < DASD_CP_ENTRIES; j++) {
if (entry == ©->entry[j])
tmp_dev = device;
else
tmp_dev = copy->entry[j].device;
if (!tmp_dev)
continue;
if (dasd_devmap_get_pprc_status(tmp_dev, &tmp_dat))
return 1;
if (dasd_devmap_entry_from_pprc_data(tmp_dat, uid, entry->primary) < 0) {
dev_warn(&tmp_dev->cdev->dev,
"Copy pair relation does not contain device: %s\n",
dev_name(&device->cdev->dev));
kfree(tmp_dat);
return 1;
}
kfree(tmp_dat);
}
return 0;
}
/* delete device from copy relation entry */
static void dasd_devmap_delete_copy_relation_device(struct dasd_device *device)
{
struct dasd_copy_relation *copy;
int i;
if (!device->copy)
return;
copy = device->copy;
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (copy->entry[i].device == device)
copy->entry[i].device = NULL;
}
dasd_put_device(device);
device->copy = NULL;
}
/*
* read all required information for a copy relation setup and setup the device
* accordingly
*/
int dasd_devmap_set_device_copy_relation(struct ccw_device *cdev,
bool pprc_enabled)
{
struct dasd_pprc_data_sc4 *data = NULL;
struct dasd_copy_entry *entry = NULL;
struct dasd_copy_relation *copy;
struct dasd_devmap *devmap;
struct dasd_device *device;
int i, rc = 0;
devmap = dasd_devmap_from_cdev(cdev);
if (IS_ERR(devmap))
return PTR_ERR(devmap);
device = devmap->device;
if (!device)
return -ENODEV;
copy = devmap->copy;
/* no copy pair setup for this device */
if (!copy)
goto out;
rc = dasd_devmap_get_pprc_status(device, &data);
if (rc)
return rc;
/* print error if PPRC is requested but not enabled on storage server */
if (!pprc_enabled) {
dev_err(&cdev->dev, "Copy relation not enabled on storage server\n");
rc = -EINVAL;
goto out;
}
if (!data->dev_info[0].state) {
dev_warn(&device->cdev->dev, "Copy pair setup requested for device not in copy relation\n");
rc = -EINVAL;
goto out;
}
/* find entry */
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (copy->entry[i].configured &&
strncmp(dev_name(&cdev->dev),
copy->entry[i].busid, DASD_BUS_ID_SIZE) == 0) {
entry = ©->entry[i];
break;
}
}
if (!entry) {
dev_warn(&device->cdev->dev, "Copy relation entry not found\n");
rc = -EINVAL;
goto out;
}
/* check if the copy relation is valid */
if (dasd_devmap_check_copy_relation(device, entry, data, copy)) {
dev_warn(&device->cdev->dev, "Copy relation faulty\n");
rc = -EINVAL;
goto out;
}
dasd_get_device(device);
copy->entry[i].device = device;
device->copy = copy;
out:
kfree(data);
return rc;
}
EXPORT_SYMBOL_GPL(dasd_devmap_set_device_copy_relation);
/*
* Wait queue for dasd_delete_device waits.
*/
static DECLARE_WAIT_QUEUE_HEAD(dasd_delete_wq);
/*
* Remove a dasd device structure. The passed referenced
* is destroyed.
*/
void
dasd_delete_device(struct dasd_device *device)
{
struct ccw_device *cdev;
struct dasd_devmap *devmap;
unsigned long flags;
/* First remove device pointer from devmap. */
devmap = dasd_find_busid(dev_name(&device->cdev->dev));
BUG_ON(IS_ERR(devmap));
spin_lock(&dasd_devmap_lock);
if (devmap->device != device) {
spin_unlock(&dasd_devmap_lock);
dasd_put_device(device);
return;
}
devmap->device = NULL;
spin_unlock(&dasd_devmap_lock);
/* Disconnect dasd_device structure from ccw_device structure. */
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
dev_set_drvdata(&device->cdev->dev, NULL);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
/* Removve copy relation */
dasd_devmap_delete_copy_relation_device(device);
/*
* Drop ref_count by 3, one for the devmap reference, one for
* the cdev reference and one for the passed reference.
*/
atomic_sub(3, &device->ref_count);
/* Wait for reference counter to drop to zero. */
wait_event(dasd_delete_wq, atomic_read(&device->ref_count) == 0);
dasd_generic_free_discipline(device);
kset_unregister(device->paths_info);
/* Disconnect dasd_device structure from ccw_device structure. */
cdev = device->cdev;
device->cdev = NULL;
/* Put ccw_device structure. */
put_device(&cdev->dev);
/* Now the device structure can be freed. */
dasd_free_device(device);
}
/*
* Reference counter dropped to zero. Wake up waiter
* in dasd_delete_device.
*/
void
dasd_put_device_wake(struct dasd_device *device)
{
wake_up(&dasd_delete_wq);
}
EXPORT_SYMBOL_GPL(dasd_put_device_wake);
/*
* Return dasd_device structure associated with cdev.
* This function needs to be called with the ccw device
* lock held. It can be used from interrupt context.
*/
struct dasd_device *
dasd_device_from_cdev_locked(struct ccw_device *cdev)
{
struct dasd_device *device = dev_get_drvdata(&cdev->dev);
if (!device)
return ERR_PTR(-ENODEV);
dasd_get_device(device);
return device;
}
/*
* Return dasd_device structure associated with cdev.
*/
struct dasd_device *
dasd_device_from_cdev(struct ccw_device *cdev)
{
struct dasd_device *device;
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
device = dasd_device_from_cdev_locked(cdev);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return device;
}
void dasd_add_link_to_gendisk(struct gendisk *gdp, struct dasd_device *device)
{
struct dasd_devmap *devmap;
devmap = dasd_find_busid(dev_name(&device->cdev->dev));
if (IS_ERR(devmap))
return;
spin_lock(&dasd_devmap_lock);
gdp->private_data = devmap;
spin_unlock(&dasd_devmap_lock);
}
EXPORT_SYMBOL(dasd_add_link_to_gendisk);
struct dasd_device *dasd_device_from_gendisk(struct gendisk *gdp)
{
struct dasd_device *device;
struct dasd_devmap *devmap;
if (!gdp->private_data)
return NULL;
device = NULL;
spin_lock(&dasd_devmap_lock);
devmap = gdp->private_data;
if (devmap && devmap->device) {
device = devmap->device;
dasd_get_device(device);
}
spin_unlock(&dasd_devmap_lock);
return device;
}
/*
* SECTION: files in sysfs
*/
/*
* failfast controls the behaviour, if no path is available
*/
static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dasd_devmap *devmap;
int ff_flag;
devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap))
ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0;
else
ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0;
return sysfs_emit(buf, ff_flag ? "1\n" : "0\n");
}
static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned int val;
int rc;
if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_FAILFAST, val);
return rc ? : count;
}
static DEVICE_ATTR(failfast, 0644, dasd_ff_show, dasd_ff_store);
/*
* readonly controls the readonly status of a dasd
*/
static ssize_t
dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
struct dasd_device *device;
int ro_flag = 0;
devmap = dasd_find_busid(dev_name(dev));
if (IS_ERR(devmap))
goto out;
ro_flag = !!(devmap->features & DASD_FEATURE_READONLY);
spin_lock(&dasd_devmap_lock);
device = devmap->device;
if (device)
ro_flag |= test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
spin_unlock(&dasd_devmap_lock);
out:
return sysfs_emit(buf, ro_flag ? "1\n" : "0\n");
}
static ssize_t
dasd_ro_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct dasd_device *device;
unsigned long flags;
unsigned int val;
int rc;
if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
rc = dasd_set_feature(cdev, DASD_FEATURE_READONLY, val);
if (rc)
return rc;
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return count;
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
val = val || test_bit(DASD_FLAG_DEVICE_RO, &device->flags);
if (!device->block || !device->block->gdp ||
test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
goto out;
}
/* Increase open_count to avoid losing the block device */
atomic_inc(&device->block->open_count);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
set_disk_ro(device->block->gdp, val);
atomic_dec(&device->block->open_count);
out:
dasd_put_device(device);
return count;
}
static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
/*
* erplog controls the logging of ERP related data
* (e.g. failing channel programs).
*/
static ssize_t
dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
int erplog;
devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap))
erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
else
erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0;
return sysfs_emit(buf, erplog ? "1\n" : "0\n");
}
static ssize_t
dasd_erplog_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned int val;
int rc;
if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_ERPLOG, val);
return rc ? : count;
}
static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
/*
* use_diag controls whether the driver should use diag rather than ssch
* to talk to the device
*/
static ssize_t
dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
int use_diag;
devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap))
use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0;
else
use_diag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USEDIAG) != 0;
return sprintf(buf, use_diag ? "1\n" : "0\n");
}
static ssize_t
dasd_use_diag_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *devmap;
unsigned int val;
ssize_t rc;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
spin_lock(&dasd_devmap_lock);
/* Changing diag discipline flag is only allowed in offline state. */
rc = count;
if (!devmap->device && !(devmap->features & DASD_FEATURE_USERAW)) {
if (val)
devmap->features |= DASD_FEATURE_USEDIAG;
else
devmap->features &= ~DASD_FEATURE_USEDIAG;
} else
rc = -EPERM;
spin_unlock(&dasd_devmap_lock);
return rc;
}
static DEVICE_ATTR(use_diag, 0644, dasd_use_diag_show, dasd_use_diag_store);
/*
* use_raw controls whether the driver should give access to raw eckd data or
* operate in standard mode
*/
static ssize_t
dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
int use_raw;
devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap))
use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
else
use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
return sprintf(buf, use_raw ? "1\n" : "0\n");
}
static ssize_t
dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *devmap;
ssize_t rc;
unsigned long val;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
if ((kstrtoul(buf, 10, &val) != 0) || val > 1)
return -EINVAL;
spin_lock(&dasd_devmap_lock);
/* Changing diag discipline flag is only allowed in offline state. */
rc = count;
if (!devmap->device && !(devmap->features & DASD_FEATURE_USEDIAG)) {
if (val)
devmap->features |= DASD_FEATURE_USERAW;
else
devmap->features &= ~DASD_FEATURE_USERAW;
} else
rc = -EPERM;
spin_unlock(&dasd_devmap_lock);
return rc;
}
static DEVICE_ATTR(raw_track_access, 0644, dasd_use_raw_show,
dasd_use_raw_store);
static ssize_t
dasd_safe_offline_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct dasd_device *device;
unsigned long flags;
int rc;
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device)) {
rc = PTR_ERR(device);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
goto out;
}
if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
rc = -EBUSY;
goto out;
}
set_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags);
dasd_put_device(device);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
rc = ccw_device_set_offline(cdev);
out:
return rc ? rc : count;
}
static DEVICE_ATTR(safe_offline, 0200, NULL, dasd_safe_offline_store);
static ssize_t
dasd_access_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ccw_device *cdev = to_ccwdev(dev);
struct dasd_device *device;
int count;
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return PTR_ERR(device);
if (!device->discipline)
count = -ENODEV;
else if (!device->discipline->host_access_count)
count = -EOPNOTSUPP;
else
count = device->discipline->host_access_count(device);
dasd_put_device(device);
if (count < 0)
return count;
return sprintf(buf, "%d\n", count);
}
static DEVICE_ATTR(host_access_count, 0444, dasd_access_show, NULL);
static ssize_t
dasd_discipline_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dasd_device *device;
ssize_t len;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
goto out;
else if (!device->discipline) {
dasd_put_device(device);
goto out;
} else {
len = sysfs_emit(buf, "%s\n",
device->discipline->name);
dasd_put_device(device);
return len;
}
out:
len = sysfs_emit(buf, "none\n");
return len;
}
static DEVICE_ATTR(discipline, 0444, dasd_discipline_show, NULL);
static ssize_t
dasd_device_status_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dasd_device *device;
ssize_t len;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (!IS_ERR(device)) {
switch (device->state) {
case DASD_STATE_NEW:
len = sysfs_emit(buf, "new\n");
break;
case DASD_STATE_KNOWN:
len = sysfs_emit(buf, "detected\n");
break;
case DASD_STATE_BASIC:
len = sysfs_emit(buf, "basic\n");
break;
case DASD_STATE_UNFMT:
len = sysfs_emit(buf, "unformatted\n");
break;
case DASD_STATE_READY:
len = sysfs_emit(buf, "ready\n");
break;
case DASD_STATE_ONLINE:
len = sysfs_emit(buf, "online\n");
break;
default:
len = sysfs_emit(buf, "no stat\n");
break;
}
dasd_put_device(device);
} else
len = sysfs_emit(buf, "unknown\n");
return len;
}
static DEVICE_ATTR(status, 0444, dasd_device_status_show, NULL);
static ssize_t dasd_alias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_device *device;
struct dasd_uid uid;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return sprintf(buf, "0\n");
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid)) {
if (uid.type == UA_BASE_PAV_ALIAS ||
uid.type == UA_HYPER_PAV_ALIAS) {
dasd_put_device(device);
return sprintf(buf, "1\n");
}
}
dasd_put_device(device);
return sprintf(buf, "0\n");
}
static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
static ssize_t dasd_vendor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_device *device;
struct dasd_uid uid;
char *vendor;
device = dasd_device_from_cdev(to_ccwdev(dev));
vendor = "";
if (IS_ERR(device))
return sysfs_emit(buf, "%s\n", vendor);
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid))
vendor = uid.vendor;
dasd_put_device(device);
return sysfs_emit(buf, "%s\n", vendor);
}
static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
static ssize_t
dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
char uid_string[DASD_UID_STRLEN];
struct dasd_device *device;
struct dasd_uid uid;
char ua_string[3];
device = dasd_device_from_cdev(to_ccwdev(dev));
uid_string[0] = 0;
if (IS_ERR(device))
return sysfs_emit(buf, "%s\n", uid_string);
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid)) {
switch (uid.type) {
case UA_BASE_DEVICE:
snprintf(ua_string, sizeof(ua_string), "%02x",
uid.real_unit_addr);
break;
case UA_BASE_PAV_ALIAS:
snprintf(ua_string, sizeof(ua_string), "%02x",
uid.base_unit_addr);
break;
case UA_HYPER_PAV_ALIAS:
snprintf(ua_string, sizeof(ua_string), "xx");
break;
default:
/* should not happen, treat like base device */
snprintf(ua_string, sizeof(ua_string), "%02x",
uid.real_unit_addr);
break;
}
if (strlen(uid.vduit) > 0)
snprintf(uid_string, sizeof(uid_string),
"%s.%s.%04x.%s.%s",
uid.vendor, uid.serial, uid.ssid, ua_string,
uid.vduit);
else
snprintf(uid_string, sizeof(uid_string),
"%s.%s.%04x.%s",
uid.vendor, uid.serial, uid.ssid, ua_string);
}
dasd_put_device(device);
return sysfs_emit(buf, "%s\n", uid_string);
}
static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
/*
* extended error-reporting
*/
static ssize_t
dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
int eer_flag;
devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap) && devmap->device)
eer_flag = dasd_eer_enabled(devmap->device);
else
eer_flag = 0;
return sysfs_emit(buf, eer_flag ? "1\n" : "0\n");
}
static ssize_t
dasd_eer_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
unsigned int val;
int rc = 0;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return PTR_ERR(device);
if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
if (val)
rc = dasd_eer_enable(device);
else
dasd_eer_disable(device);
dasd_put_device(device);
return rc ? : count;
}
static DEVICE_ATTR(eer_enabled, 0644, dasd_eer_show, dasd_eer_store);
/*
* aq_mask controls if the DASD should be quiesced on certain triggers
* The aq_mask attribute is interpreted as bitmap of the DASD_EER_* triggers.
*/
static ssize_t dasd_aq_mask_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dasd_devmap *devmap;
unsigned int aq_mask = 0;
devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap))
aq_mask = devmap->aq_mask;
return sysfs_emit(buf, "%d\n", aq_mask);
}
static ssize_t dasd_aq_mask_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *devmap;
unsigned int val;
if (kstrtouint(buf, 0, &val) || val > DASD_EER_VALID)
return -EINVAL;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
spin_lock(&dasd_devmap_lock);
devmap->aq_mask = val;
if (devmap->device)
devmap->device->aq_mask = devmap->aq_mask;
spin_unlock(&dasd_devmap_lock);
return count;
}
static DEVICE_ATTR(aq_mask, 0644, dasd_aq_mask_show, dasd_aq_mask_store);
/*
* aq_requeue controls if requests are returned to the blocklayer on quiesce
* or if requests are only not started
*/
static ssize_t dasd_aqr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dasd_devmap *devmap;
int flag;
devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap))
flag = (devmap->features & DASD_FEATURE_REQUEUEQUIESCE) != 0;
else
flag = (DASD_FEATURE_DEFAULT &
DASD_FEATURE_REQUEUEQUIESCE) != 0;
return sysfs_emit(buf, "%d\n", flag);
}
static ssize_t dasd_aqr_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
bool val;
int rc;
if (kstrtobool(buf, &val))
return -EINVAL;
rc = dasd_set_feature(to_ccwdev(dev), DASD_FEATURE_REQUEUEQUIESCE, val);
return rc ? : count;
}
static DEVICE_ATTR(aq_requeue, 0644, dasd_aqr_show, dasd_aqr_store);
/*
* aq_timeouts controls how much retries have to time out until
* a device gets autoquiesced
*/
static ssize_t
dasd_aq_timeouts_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dasd_device *device;
int len;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
len = sysfs_emit(buf, "%u\n", device->aq_timeouts);
dasd_put_device(device);
return len;
}
static ssize_t
dasd_aq_timeouts_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
unsigned int val;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
if ((kstrtouint(buf, 10, &val) != 0) ||
val > DASD_RETRIES_MAX || val == 0) {
dasd_put_device(device);
return -EINVAL;
}
if (val)
device->aq_timeouts = val;
dasd_put_device(device);
return count;
}
static DEVICE_ATTR(aq_timeouts, 0644, dasd_aq_timeouts_show,
dasd_aq_timeouts_store);
/*
* expiration time for default requests
*/
static ssize_t
dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_device *device;
int len;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
len = sysfs_emit(buf, "%lu\n", device->default_expires);
dasd_put_device(device);
return len;
}
static ssize_t
dasd_expires_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
unsigned long val;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
if ((kstrtoul(buf, 10, &val) != 0) ||
(val > DASD_EXPIRES_MAX) || val == 0) {
dasd_put_device(device);
return -EINVAL;
}
if (val)
device->default_expires = val;
dasd_put_device(device);
return count;
}
static DEVICE_ATTR(expires, 0644, dasd_expires_show, dasd_expires_store);
static ssize_t
dasd_retries_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dasd_device *device;
int len;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
len = sysfs_emit(buf, "%lu\n", device->default_retries);
dasd_put_device(device);
return len;
}
static ssize_t
dasd_retries_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
unsigned long val;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
if ((kstrtoul(buf, 10, &val) != 0) ||
(val > DASD_RETRIES_MAX)) {
dasd_put_device(device);
return -EINVAL;
}
if (val)
device->default_retries = val;
dasd_put_device(device);
return count;
}
static DEVICE_ATTR(retries, 0644, dasd_retries_show, dasd_retries_store);
static ssize_t
dasd_timeout_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dasd_device *device;
int len;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
len = sysfs_emit(buf, "%lu\n", device->blk_timeout);
dasd_put_device(device);
return len;
}
static ssize_t
dasd_timeout_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
unsigned long val;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device) || !device->block)
return -ENODEV;
if ((kstrtoul(buf, 10, &val) != 0) ||
val > UINT_MAX / HZ) {
dasd_put_device(device);
return -EINVAL;
}
if (!device->block->gdp) {
dasd_put_device(device);
return -ENODEV;
}
device->blk_timeout = val;
blk_queue_rq_timeout(device->block->gdp->queue, val * HZ);
dasd_put_device(device);
return count;
}
static DEVICE_ATTR(timeout, 0644,
dasd_timeout_show, dasd_timeout_store);
static ssize_t
dasd_path_reset_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
unsigned int val;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
if ((kstrtouint(buf, 16, &val) != 0) || val > 0xff)
val = 0;
if (device->discipline && device->discipline->reset_path)
device->discipline->reset_path(device, (__u8) val);
dasd_put_device(device);
return count;
}
static DEVICE_ATTR(path_reset, 0200, NULL, dasd_path_reset_store);
static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dasd_device *device;
int hpf;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
if (!device->discipline || !device->discipline->hpf_enabled) {
dasd_put_device(device);
return sysfs_emit(buf, "%d\n", dasd_nofcx);
}
hpf = device->discipline->hpf_enabled(device);
dasd_put_device(device);
return sysfs_emit(buf, "%d\n", hpf);
}
static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL);
static ssize_t dasd_reservation_policy_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dasd_devmap *devmap;
int rc = 0;
devmap = dasd_find_busid(dev_name(dev));
if (IS_ERR(devmap)) {
rc = sysfs_emit(buf, "ignore\n");
} else {
spin_lock(&dasd_devmap_lock);
if (devmap->features & DASD_FEATURE_FAILONSLCK)
rc = sysfs_emit(buf, "fail\n");
else
rc = sysfs_emit(buf, "ignore\n");
spin_unlock(&dasd_devmap_lock);
}
return rc;
}
static ssize_t dasd_reservation_policy_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ccw_device *cdev = to_ccwdev(dev);
int rc;
if (sysfs_streq("ignore", buf))
rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 0);
else if (sysfs_streq("fail", buf))
rc = dasd_set_feature(cdev, DASD_FEATURE_FAILONSLCK, 1);
else
rc = -EINVAL;
return rc ? : count;
}
static DEVICE_ATTR(reservation_policy, 0644,
dasd_reservation_policy_show, dasd_reservation_policy_store);
static ssize_t dasd_reservation_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct dasd_device *device;
int rc = 0;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return sysfs_emit(buf, "none\n");
if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
rc = sysfs_emit(buf, "reserved\n");
else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
rc = sysfs_emit(buf, "lost\n");
else
rc = sysfs_emit(buf, "none\n");
dasd_put_device(device);
return rc;
}
static ssize_t dasd_reservation_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
int rc = 0;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
if (sysfs_streq("reset", buf))
clear_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
else
rc = -EINVAL;
dasd_put_device(device);
if (rc)
return rc;
else
return count;
}
static DEVICE_ATTR(last_known_reservation_state, 0644,
dasd_reservation_state_show, dasd_reservation_state_store);
static ssize_t dasd_pm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_device *device;
u8 opm, nppm, cablepm, cuirpm, hpfpm, ifccpm;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return sprintf(buf, "0\n");
opm = dasd_path_get_opm(device);
nppm = dasd_path_get_nppm(device);
cablepm = dasd_path_get_cablepm(device);
cuirpm = dasd_path_get_cuirpm(device);
hpfpm = dasd_path_get_hpfpm(device);
ifccpm = dasd_path_get_ifccpm(device);
dasd_put_device(device);
return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
cablepm, cuirpm, hpfpm, ifccpm);
}
static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
/*
* threshold value for IFCC/CCC errors
*/
static ssize_t
dasd_path_threshold_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_device *device;
int len;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
len = sysfs_emit(buf, "%lu\n", device->path_thrhld);
dasd_put_device(device);
return len;
}
static ssize_t
dasd_path_threshold_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
unsigned long flags;
unsigned long val;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
if (kstrtoul(buf, 10, &val) != 0 || val > DASD_THRHLD_MAX) {
dasd_put_device(device);
return -EINVAL;
}
spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
device->path_thrhld = val;
spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
dasd_put_device(device);
return count;
}
static DEVICE_ATTR(path_threshold, 0644, dasd_path_threshold_show,
dasd_path_threshold_store);
/*
* configure if path is disabled after IFCC/CCC error threshold is
* exceeded
*/
static ssize_t
dasd_path_autodisable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_devmap *devmap;
int flag;
devmap = dasd_find_busid(dev_name(dev));
if (!IS_ERR(devmap))
flag = (devmap->features & DASD_FEATURE_PATH_AUTODISABLE) != 0;
else
flag = (DASD_FEATURE_DEFAULT &
DASD_FEATURE_PATH_AUTODISABLE) != 0;
return sysfs_emit(buf, flag ? "1\n" : "0\n");
}
static ssize_t
dasd_path_autodisable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned int val;
int rc;
if (kstrtouint(buf, 0, &val) || val > 1)
return -EINVAL;
rc = dasd_set_feature(to_ccwdev(dev),
DASD_FEATURE_PATH_AUTODISABLE, val);
return rc ? : count;
}
static DEVICE_ATTR(path_autodisable, 0644,
dasd_path_autodisable_show,
dasd_path_autodisable_store);
/*
* interval for IFCC/CCC checks
* meaning time with no IFCC/CCC error before the error counter
* gets reset
*/
static ssize_t
dasd_path_interval_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_device *device;
int len;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
len = sysfs_emit(buf, "%lu\n", device->path_interval);
dasd_put_device(device);
return len;
}
static ssize_t
dasd_path_interval_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
unsigned long flags;
unsigned long val;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
if ((kstrtoul(buf, 10, &val) != 0) ||
(val > DASD_INTERVAL_MAX) || val == 0) {
dasd_put_device(device);
return -EINVAL;
}
spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
if (val)
device->path_interval = val;
spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
dasd_put_device(device);
return count;
}
static DEVICE_ATTR(path_interval, 0644, dasd_path_interval_show,
dasd_path_interval_store);
static ssize_t
dasd_device_fcs_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dasd_device *device;
int fc_sec;
int rc;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
fc_sec = dasd_path_get_fcs_device(device);
if (fc_sec == -EINVAL)
rc = sysfs_emit(buf, "Inconsistent\n");
else
rc = sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec));
dasd_put_device(device);
return rc;
}
static DEVICE_ATTR(fc_security, 0444, dasd_device_fcs_show, NULL);
static ssize_t
dasd_path_fcs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{
struct dasd_path *path = to_dasd_path(kobj);
unsigned int fc_sec = path->fc_security;
return sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec));
}
static struct kobj_attribute path_fcs_attribute =
__ATTR(fc_security, 0444, dasd_path_fcs_show, NULL);
/*
* print copy relation in the form
* primary,secondary[1] primary,secondary[2], ...
*/
static ssize_t
dasd_copy_pair_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
char prim_busid[DASD_BUS_ID_SIZE];
struct dasd_copy_relation *copy;
struct dasd_devmap *devmap;
int len = 0;
int i;
devmap = dasd_find_busid(dev_name(dev));
if (IS_ERR(devmap))
return -ENODEV;
if (!devmap->copy)
return -ENODEV;
copy = devmap->copy;
/* find primary */
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (copy->entry[i].configured && copy->entry[i].primary) {
strscpy(prim_busid, copy->entry[i].busid,
DASD_BUS_ID_SIZE);
break;
}
}
if (i == DASD_CP_ENTRIES)
goto out;
/* print all secondary */
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (copy->entry[i].configured && !copy->entry[i].primary)
len += sysfs_emit_at(buf, len, "%s,%s ", prim_busid,
copy->entry[i].busid);
}
len += sysfs_emit_at(buf, len, "\n");
out:
return len;
}
static int dasd_devmap_set_copy_relation(struct dasd_devmap *devmap,
struct dasd_copy_relation *copy,
char *busid, bool primary)
{
int i;
/* find free entry */
for (i = 0; i < DASD_CP_ENTRIES; i++) {
/* current bus_id already included, nothing to do */
if (copy->entry[i].configured &&
strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
return 0;
if (!copy->entry[i].configured)
break;
}
if (i == DASD_CP_ENTRIES)
return -EINVAL;
copy->entry[i].configured = true;
strscpy(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE);
if (primary) {
copy->active = ©->entry[i];
copy->entry[i].primary = true;
}
if (!devmap->copy)
devmap->copy = copy;
return 0;
}
static void dasd_devmap_del_copy_relation(struct dasd_copy_relation *copy,
char *busid)
{
int i;
spin_lock(&dasd_devmap_lock);
/* find entry */
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (copy->entry[i].configured &&
strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
break;
}
if (i == DASD_CP_ENTRIES || !copy->entry[i].configured) {
spin_unlock(&dasd_devmap_lock);
return;
}
copy->entry[i].configured = false;
memset(copy->entry[i].busid, 0, DASD_BUS_ID_SIZE);
if (copy->active == ©->entry[i]) {
copy->active = NULL;
copy->entry[i].primary = false;
}
spin_unlock(&dasd_devmap_lock);
}
static int dasd_devmap_clear_copy_relation(struct device *dev)
{
struct dasd_copy_relation *copy;
struct dasd_devmap *devmap;
int i, rc = 1;
devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
if (IS_ERR(devmap))
return 1;
spin_lock(&dasd_devmap_lock);
if (!devmap->copy)
goto out;
copy = devmap->copy;
/* first check if all secondary devices are offline*/
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (!copy->entry[i].configured)
continue;
if (copy->entry[i].device == copy->active->device)
continue;
if (copy->entry[i].device)
goto out;
}
/* clear all devmap entries */
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (strlen(copy->entry[i].busid) == 0)
continue;
if (copy->entry[i].device) {
dasd_put_device(copy->entry[i].device);
copy->entry[i].device->copy = NULL;
copy->entry[i].device = NULL;
}
devmap = dasd_find_busid_locked(copy->entry[i].busid);
devmap->copy = NULL;
memset(copy->entry[i].busid, 0, DASD_BUS_ID_SIZE);
}
kfree(copy);
rc = 0;
out:
spin_unlock(&dasd_devmap_lock);
return rc;
}
/*
* parse BUSIDs from a copy pair
*/
static int dasd_devmap_parse_busid(const char *buf, char *prim_busid,
char *sec_busid)
{
char *primary, *secondary, *tmp, *pt;
int id0, id1, id2;
pt = kstrdup(buf, GFP_KERNEL);
tmp = pt;
if (!tmp)
return -ENOMEM;
primary = strsep(&tmp, ",");
if (!primary) {
kfree(pt);
return -EINVAL;
}
secondary = strsep(&tmp, ",");
if (!secondary) {
kfree(pt);
return -EINVAL;
}
if (dasd_busid(primary, &id0, &id1, &id2)) {
kfree(pt);
return -EINVAL;
}
sprintf(prim_busid, "%01x.%01x.%04x", id0, id1, id2);
if (dasd_busid(secondary, &id0, &id1, &id2)) {
kfree(pt);
return -EINVAL;
}
sprintf(sec_busid, "%01x.%01x.%04x", id0, id1, id2);
kfree(pt);
return 0;
}
static ssize_t dasd_copy_pair_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_devmap *prim_devmap, *sec_devmap;
char prim_busid[DASD_BUS_ID_SIZE];
char sec_busid[DASD_BUS_ID_SIZE];
struct dasd_copy_relation *copy;
struct dasd_device *device;
bool pprc_enabled;
int rc;
if (strncmp(buf, "clear", strlen("clear")) == 0) {
if (dasd_devmap_clear_copy_relation(dev))
return -EINVAL;
return count;
}
rc = dasd_devmap_parse_busid(buf, prim_busid, sec_busid);
if (rc)
return rc;
if (strncmp(dev_name(dev), prim_busid, DASD_BUS_ID_SIZE) != 0 &&
strncmp(dev_name(dev), sec_busid, DASD_BUS_ID_SIZE) != 0)
return -EINVAL;
/* allocate primary devmap if needed */
prim_devmap = dasd_find_busid(prim_busid);
if (IS_ERR(prim_devmap))
prim_devmap = dasd_add_busid(prim_busid, DASD_FEATURE_DEFAULT);
/* allocate secondary devmap if needed */
sec_devmap = dasd_find_busid(sec_busid);
if (IS_ERR(sec_devmap))
sec_devmap = dasd_add_busid(sec_busid, DASD_FEATURE_DEFAULT);
/* setting copy relation is only allowed for offline secondary */
if (sec_devmap->device)
return -EINVAL;
if (prim_devmap->copy) {
copy = prim_devmap->copy;
} else if (sec_devmap->copy) {
copy = sec_devmap->copy;
} else {
copy = kzalloc(sizeof(*copy), GFP_KERNEL);
if (!copy)
return -ENOMEM;
}
spin_lock(&dasd_devmap_lock);
rc = dasd_devmap_set_copy_relation(prim_devmap, copy, prim_busid, true);
if (rc) {
spin_unlock(&dasd_devmap_lock);
return rc;
}
rc = dasd_devmap_set_copy_relation(sec_devmap, copy, sec_busid, false);
if (rc) {
spin_unlock(&dasd_devmap_lock);
return rc;
}
spin_unlock(&dasd_devmap_lock);
/* if primary device is already online call device setup directly */
if (prim_devmap->device && !prim_devmap->device->copy) {
device = prim_devmap->device;
if (device->discipline->pprc_enabled) {
pprc_enabled = device->discipline->pprc_enabled(device);
rc = dasd_devmap_set_device_copy_relation(device->cdev,
pprc_enabled);
} else {
rc = -EOPNOTSUPP;
}
}
if (rc) {
dasd_devmap_del_copy_relation(copy, prim_busid);
dasd_devmap_del_copy_relation(copy, sec_busid);
count = rc;
}
return count;
}
static DEVICE_ATTR(copy_pair, 0644, dasd_copy_pair_show,
dasd_copy_pair_store);
static ssize_t
dasd_copy_role_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dasd_copy_relation *copy;
struct dasd_device *device;
int len, i;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
if (!device->copy) {
len = sysfs_emit(buf, "none\n");
goto out;
}
copy = device->copy;
/* only the active device is primary */
if (copy->active->device == device) {
len = sysfs_emit(buf, "primary\n");
goto out;
}
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (copy->entry[i].device == device) {
len = sysfs_emit(buf, "secondary\n");
goto out;
}
}
/* not in the list, no COPY role */
len = sysfs_emit(buf, "none\n");
out:
dasd_put_device(device);
return len;
}
static DEVICE_ATTR(copy_role, 0444, dasd_copy_role_show, NULL);
static ssize_t dasd_device_ping(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct dasd_device *device;
size_t rc;
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
/*
* do not try during offline processing
* early check only
* the sleep_on function itself checks for offline
* processing again
*/
if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
rc = -EBUSY;
goto out;
}
if (!device->discipline || !device->discipline->device_ping) {
rc = -EOPNOTSUPP;
goto out;
}
rc = device->discipline->device_ping(device);
if (!rc)
rc = count;
out:
dasd_put_device(device);
return rc;
}
static DEVICE_ATTR(ping, 0200, NULL, dasd_device_ping);
#define DASD_DEFINE_ATTR(_name, _func) \
static ssize_t dasd_##_name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct ccw_device *cdev = to_ccwdev(dev); \
struct dasd_device *device = dasd_device_from_cdev(cdev); \
int val = 0; \
\
if (IS_ERR(device)) \
return -ENODEV; \
if (device->discipline && _func) \
val = _func(device); \
dasd_put_device(device); \
\
return sysfs_emit(buf, "%d\n", val); \
} \
static DEVICE_ATTR(_name, 0444, dasd_##_name##_show, NULL); \
DASD_DEFINE_ATTR(ese, device->discipline->is_ese);
DASD_DEFINE_ATTR(extent_size, device->discipline->ext_size);
DASD_DEFINE_ATTR(pool_id, device->discipline->ext_pool_id);
DASD_DEFINE_ATTR(space_configured, device->discipline->space_configured);
DASD_DEFINE_ATTR(space_allocated, device->discipline->space_allocated);
DASD_DEFINE_ATTR(logical_capacity, device->discipline->logical_capacity);
DASD_DEFINE_ATTR(warn_threshold, device->discipline->ext_pool_warn_thrshld);
DASD_DEFINE_ATTR(cap_at_warnlevel, device->discipline->ext_pool_cap_at_warnlevel);
DASD_DEFINE_ATTR(pool_oos, device->discipline->ext_pool_oos);
static struct attribute * dasd_attrs[] = {
&dev_attr_readonly.attr,
&dev_attr_discipline.attr,
&dev_attr_status.attr,
&dev_attr_alias.attr,
&dev_attr_vendor.attr,
&dev_attr_uid.attr,
&dev_attr_use_diag.attr,
&dev_attr_raw_track_access.attr,
&dev_attr_eer_enabled.attr,
&dev_attr_erplog.attr,
&dev_attr_failfast.attr,
&dev_attr_expires.attr,
&dev_attr_retries.attr,
&dev_attr_timeout.attr,
&dev_attr_reservation_policy.attr,
&dev_attr_last_known_reservation_state.attr,
&dev_attr_safe_offline.attr,
&dev_attr_host_access_count.attr,
&dev_attr_path_masks.attr,
&dev_attr_path_threshold.attr,
&dev_attr_path_autodisable.attr,
&dev_attr_path_interval.attr,
&dev_attr_path_reset.attr,
&dev_attr_hpf.attr,
&dev_attr_ese.attr,
&dev_attr_fc_security.attr,
&dev_attr_copy_pair.attr,
&dev_attr_copy_role.attr,
&dev_attr_ping.attr,
&dev_attr_aq_mask.attr,
&dev_attr_aq_requeue.attr,
&dev_attr_aq_timeouts.attr,
NULL,
};
static const struct attribute_group dasd_attr_group = {
.attrs = dasd_attrs,
};
static struct attribute *capacity_attrs[] = {
&dev_attr_space_configured.attr,
&dev_attr_space_allocated.attr,
&dev_attr_logical_capacity.attr,
NULL,
};
static const struct attribute_group capacity_attr_group = {
.name = "capacity",
.attrs = capacity_attrs,
};
static struct attribute *ext_pool_attrs[] = {
&dev_attr_pool_id.attr,
&dev_attr_extent_size.attr,
&dev_attr_warn_threshold.attr,
&dev_attr_cap_at_warnlevel.attr,
&dev_attr_pool_oos.attr,
NULL,
};
static const struct attribute_group ext_pool_attr_group = {
.name = "extent_pool",
.attrs = ext_pool_attrs,
};
const struct attribute_group *dasd_dev_groups[] = {
&dasd_attr_group,
&capacity_attr_group,
&ext_pool_attr_group,
NULL,
};
EXPORT_SYMBOL_GPL(dasd_dev_groups);
/*
* Return value of the specified feature.
*/
int
dasd_get_feature(struct ccw_device *cdev, int feature)
{
struct dasd_devmap *devmap;
devmap = dasd_find_busid(dev_name(&cdev->dev));
if (IS_ERR(devmap))
return PTR_ERR(devmap);
return ((devmap->features & feature) != 0);
}
/*
* Set / reset given feature.
* Flag indicates whether to set (!=0) or the reset (=0) the feature.
*/
int
dasd_set_feature(struct ccw_device *cdev, int feature, int flag)
{
struct dasd_devmap *devmap;
devmap = dasd_devmap_from_cdev(cdev);
if (IS_ERR(devmap))
return PTR_ERR(devmap);
spin_lock(&dasd_devmap_lock);
if (flag)
devmap->features |= feature;
else
devmap->features &= ~feature;
if (devmap->device)
devmap->device->features = devmap->features;
spin_unlock(&dasd_devmap_lock);
return 0;
}
EXPORT_SYMBOL(dasd_set_feature);
static struct attribute *paths_info_attrs[] = {
&path_fcs_attribute.attr,
NULL,
};
ATTRIBUTE_GROUPS(paths_info);
static struct kobj_type path_attr_type = {
.release = dasd_path_release,
.default_groups = paths_info_groups,
.sysfs_ops = &kobj_sysfs_ops,
};
static void dasd_path_init_kobj(struct dasd_device *device, int chp)
{
device->path[chp].kobj.kset = device->paths_info;
kobject_init(&device->path[chp].kobj, &path_attr_type);
}
void dasd_path_create_kobj(struct dasd_device *device, int chp)
{
int rc;
if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
return;
if (!device->paths_info) {
dev_warn(&device->cdev->dev, "Unable to create paths objects\n");
return;
}
if (device->path[chp].in_sysfs)
return;
if (!device->path[chp].conf_data)
return;
dasd_path_init_kobj(device, chp);
rc = kobject_add(&device->path[chp].kobj, NULL, "%x.%02x",
device->path[chp].cssid, device->path[chp].chpid);
if (rc)
kobject_put(&device->path[chp].kobj);
device->path[chp].in_sysfs = true;
}
EXPORT_SYMBOL(dasd_path_create_kobj);
void dasd_path_create_kobjects(struct dasd_device *device)
{
u8 lpm, opm;
opm = dasd_path_get_opm(device);
for (lpm = 0x80; lpm; lpm >>= 1) {
if (!(lpm & opm))
continue;
dasd_path_create_kobj(device, pathmask_to_pos(lpm));
}
}
EXPORT_SYMBOL(dasd_path_create_kobjects);
static void dasd_path_remove_kobj(struct dasd_device *device, int chp)
{
if (device->path[chp].in_sysfs) {
kobject_put(&device->path[chp].kobj);
device->path[chp].in_sysfs = false;
}
}
/*
* As we keep kobjects for the lifetime of a device, this function must not be
* called anywhere but in the context of offlining a device.
*/
void dasd_path_remove_kobjects(struct dasd_device *device)
{
int i;
for (i = 0; i < 8; i++)
dasd_path_remove_kobj(device, i);
}
EXPORT_SYMBOL(dasd_path_remove_kobjects);
int
dasd_devmap_init(void)
{
int i;
/* Initialize devmap structures. */
dasd_max_devindex = 0;
for (i = 0; i < 256; i++)
INIT_LIST_HEAD(&dasd_hashlists[i]);
return 0;
}
void
dasd_devmap_exit(void)
{
dasd_forget_ranges();
}
| linux-master | drivers/s390/block/dasd_devmap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <[email protected]>
* Horst Hummel <[email protected]>
* Carsten Otte <[email protected]>
* Martin Schwidefsky <[email protected]>
* Bugreports.to..: <[email protected]>
* Copyright IBM Corp. 1999, 2002
*
* /proc interface for the dasd driver.
*
*/
#define KMSG_COMPONENT "dasd"
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/proc_fs.h>
#include <asm/debug.h>
#include <linux/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_proc:"
#include "dasd_int.h"
static struct proc_dir_entry *dasd_proc_root_entry = NULL;
static struct proc_dir_entry *dasd_devices_entry = NULL;
static struct proc_dir_entry *dasd_statistics_entry = NULL;
static int
dasd_devices_show(struct seq_file *m, void *v)
{
struct dasd_device *device;
struct dasd_block *block;
char *substr;
device = dasd_device_from_devindex((unsigned long) v - 1);
if (IS_ERR(device))
return 0;
if (device->block)
block = device->block;
else {
dasd_put_device(device);
return 0;
}
/* Print device number. */
seq_printf(m, "%s", dev_name(&device->cdev->dev));
/* Print discipline string. */
if (device->discipline != NULL)
seq_printf(m, "(%s)", device->discipline->name);
else
seq_printf(m, "(none)");
/* Print kdev. */
if (block->gdp)
seq_printf(m, " at (%3d:%6d)",
MAJOR(disk_devt(block->gdp)),
MINOR(disk_devt(block->gdp)));
else
seq_printf(m, " at (???:??????)");
/* Print device name. */
if (block->gdp)
seq_printf(m, " is %-8s", block->gdp->disk_name);
else
seq_printf(m, " is ????????");
/* Print devices features. */
substr = (device->features & DASD_FEATURE_READONLY) ? "(ro)" : " ";
seq_printf(m, "%4s: ", substr);
/* Print device status information. */
switch (device->state) {
case DASD_STATE_NEW:
seq_printf(m, "new");
break;
case DASD_STATE_KNOWN:
seq_printf(m, "detected");
break;
case DASD_STATE_BASIC:
seq_printf(m, "basic");
break;
case DASD_STATE_UNFMT:
seq_printf(m, "unformatted");
break;
case DASD_STATE_READY:
case DASD_STATE_ONLINE:
seq_printf(m, "active ");
if (dasd_check_blocksize(block->bp_block))
seq_printf(m, "n/f ");
else
seq_printf(m,
"at blocksize: %u, %lu blocks, %lu MB",
block->bp_block, block->blocks,
((block->bp_block >> 9) *
block->blocks) >> 11);
break;
default:
seq_printf(m, "no stat");
break;
}
dasd_put_device(device);
if (dasd_probeonly)
seq_printf(m, "(probeonly)");
seq_printf(m, "\n");
return 0;
}
static void *dasd_devices_start(struct seq_file *m, loff_t *pos)
{
if (*pos >= dasd_max_devindex)
return NULL;
return (void *)((unsigned long) *pos + 1);
}
static void *dasd_devices_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return dasd_devices_start(m, pos);
}
static void dasd_devices_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations dasd_devices_seq_ops = {
.start = dasd_devices_start,
.next = dasd_devices_next,
.stop = dasd_devices_stop,
.show = dasd_devices_show,
};
#ifdef CONFIG_DASD_PROFILE
static int dasd_stats_all_block_on(void)
{
int i, rc;
struct dasd_device *device;
rc = 0;
for (i = 0; i < dasd_max_devindex; ++i) {
device = dasd_device_from_devindex(i);
if (IS_ERR(device))
continue;
if (device->block)
rc = dasd_profile_on(&device->block->profile);
dasd_put_device(device);
if (rc)
return rc;
}
return 0;
}
static void dasd_stats_all_block_off(void)
{
int i;
struct dasd_device *device;
for (i = 0; i < dasd_max_devindex; ++i) {
device = dasd_device_from_devindex(i);
if (IS_ERR(device))
continue;
if (device->block)
dasd_profile_off(&device->block->profile);
dasd_put_device(device);
}
}
static void dasd_stats_all_block_reset(void)
{
int i;
struct dasd_device *device;
for (i = 0; i < dasd_max_devindex; ++i) {
device = dasd_device_from_devindex(i);
if (IS_ERR(device))
continue;
if (device->block)
dasd_profile_reset(&device->block->profile);
dasd_put_device(device);
}
}
static void dasd_statistics_array(struct seq_file *m, unsigned int *array, int factor)
{
int i;
for (i = 0; i < 32; i++) {
seq_printf(m, "%7d ", array[i] / factor);
if (i == 15)
seq_putc(m, '\n');
}
seq_putc(m, '\n');
}
#endif /* CONFIG_DASD_PROFILE */
static int dasd_stats_proc_show(struct seq_file *m, void *v)
{
#ifdef CONFIG_DASD_PROFILE
struct dasd_profile_info *prof;
int factor;
spin_lock_bh(&dasd_global_profile.lock);
prof = dasd_global_profile.data;
if (!prof) {
spin_unlock_bh(&dasd_global_profile.lock);
seq_printf(m, "Statistics are off - they might be "
"switched on using 'echo set on > "
"/proc/dasd/statistics'\n");
return 0;
}
/* prevent counter 'overflow' on output */
for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999;
factor *= 10);
seq_printf(m, "%d dasd I/O requests\n", prof->dasd_io_reqs);
seq_printf(m, "with %u sectors(512B each)\n",
prof->dasd_io_sects);
seq_printf(m, "Scale Factor is %d\n", factor);
seq_printf(m,
" __<4 ___8 __16 __32 __64 _128 "
" _256 _512 __1k __2k __4k __8k "
" _16k _32k _64k 128k\n");
seq_printf(m,
" _256 _512 __1M __2M __4M __8M "
" _16M _32M _64M 128M 256M 512M "
" __1G __2G __4G " " _>4G\n");
seq_printf(m, "Histogram of sizes (512B secs)\n");
dasd_statistics_array(m, prof->dasd_io_secs, factor);
seq_printf(m, "Histogram of I/O times (microseconds)\n");
dasd_statistics_array(m, prof->dasd_io_times, factor);
seq_printf(m, "Histogram of I/O times per sector\n");
dasd_statistics_array(m, prof->dasd_io_timps, factor);
seq_printf(m, "Histogram of I/O time till ssch\n");
dasd_statistics_array(m, prof->dasd_io_time1, factor);
seq_printf(m, "Histogram of I/O time between ssch and irq\n");
dasd_statistics_array(m, prof->dasd_io_time2, factor);
seq_printf(m, "Histogram of I/O time between ssch "
"and irq per sector\n");
dasd_statistics_array(m, prof->dasd_io_time2ps, factor);
seq_printf(m, "Histogram of I/O time between irq and end\n");
dasd_statistics_array(m, prof->dasd_io_time3, factor);
seq_printf(m, "# of req in chanq at enqueuing (1..32) \n");
dasd_statistics_array(m, prof->dasd_io_nr_req, factor);
spin_unlock_bh(&dasd_global_profile.lock);
#else
seq_printf(m, "Statistics are not activated in this kernel\n");
#endif
return 0;
}
static int dasd_stats_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, dasd_stats_proc_show, NULL);
}
static ssize_t dasd_stats_proc_write(struct file *file,
const char __user *user_buf, size_t user_len, loff_t *pos)
{
#ifdef CONFIG_DASD_PROFILE
char *buffer, *str;
int rc;
if (user_len > 65536)
user_len = 65536;
buffer = dasd_get_user_string(user_buf, user_len);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
/* check for valid verbs */
str = skip_spaces(buffer);
if (strncmp(str, "set", 3) == 0 && isspace(str[3])) {
/* 'set xxx' was given */
str = skip_spaces(str + 4);
if (strcmp(str, "on") == 0) {
/* switch on statistics profiling */
rc = dasd_stats_all_block_on();
if (rc) {
dasd_stats_all_block_off();
goto out_error;
}
rc = dasd_profile_on(&dasd_global_profile);
if (rc) {
dasd_stats_all_block_off();
goto out_error;
}
dasd_profile_reset(&dasd_global_profile);
dasd_global_profile_level = DASD_PROFILE_ON;
pr_info("The statistics feature has been switched "
"on\n");
} else if (strcmp(str, "off") == 0) {
/* switch off statistics profiling */
dasd_global_profile_level = DASD_PROFILE_OFF;
dasd_profile_off(&dasd_global_profile);
dasd_stats_all_block_off();
pr_info("The statistics feature has been switched "
"off\n");
} else
goto out_parse_error;
} else if (strncmp(str, "reset", 5) == 0) {
/* reset the statistics */
dasd_profile_reset(&dasd_global_profile);
dasd_stats_all_block_reset();
pr_info("The statistics have been reset\n");
} else
goto out_parse_error;
vfree(buffer);
return user_len;
out_parse_error:
rc = -EINVAL;
pr_warn("%s is not a supported value for /proc/dasd/statistics\n", str);
out_error:
vfree(buffer);
return rc;
#else
pr_warn("/proc/dasd/statistics: is not activated in this kernel\n");
return user_len;
#endif /* CONFIG_DASD_PROFILE */
}
static const struct proc_ops dasd_stats_proc_ops = {
.proc_open = dasd_stats_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
.proc_write = dasd_stats_proc_write,
};
/*
* Create dasd proc-fs entries.
* In case creation failed, cleanup and return -ENOENT.
*/
int
dasd_proc_init(void)
{
dasd_proc_root_entry = proc_mkdir("dasd", NULL);
if (!dasd_proc_root_entry)
goto out_nodasd;
dasd_devices_entry = proc_create_seq("devices", 0444,
dasd_proc_root_entry,
&dasd_devices_seq_ops);
if (!dasd_devices_entry)
goto out_nodevices;
dasd_statistics_entry = proc_create("statistics",
S_IFREG | S_IRUGO | S_IWUSR,
dasd_proc_root_entry,
&dasd_stats_proc_ops);
if (!dasd_statistics_entry)
goto out_nostatistics;
return 0;
out_nostatistics:
remove_proc_entry("devices", dasd_proc_root_entry);
out_nodevices:
remove_proc_entry("dasd", NULL);
out_nodasd:
return -ENOENT;
}
void
dasd_proc_exit(void)
{
remove_proc_entry("devices", dasd_proc_root_entry);
remove_proc_entry("statistics", dasd_proc_root_entry);
remove_proc_entry("dasd", NULL);
}
| linux-master | drivers/s390/block/dasd_proc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <[email protected]>
* Horst Hummel <[email protected]>
* Carsten Otte <[email protected]>
* Martin Schwidefsky <[email protected]>
* Bugreports.to..: <[email protected]>
* Copyright IBM Corp. 1999, 2009
* EMC Symmetrix ioctl Copyright EMC Corporation, 2008
* Author.........: Nigel Hislop <[email protected]>
*/
#define KMSG_COMPONENT "dasd-eckd"
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/bio.h>
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/css_chars.h>
#include <asm/debug.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#include <asm/itcw.h>
#include <asm/schid.h>
#include <asm/chpid.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
#ifdef PRINTK_HEADER
#undef PRINTK_HEADER
#endif /* PRINTK_HEADER */
#define PRINTK_HEADER "dasd(eckd):"
/*
* raw track access always map to 64k in memory
* so it maps to 16 blocks of 4k per track
*/
#define DASD_RAW_BLOCK_PER_TRACK 16
#define DASD_RAW_BLOCKSIZE 4096
/* 64k are 128 x 512 byte sectors */
#define DASD_RAW_SECTORS_PER_TRACK 128
MODULE_LICENSE("GPL");
static struct dasd_discipline dasd_eckd_discipline;
/* The ccw bus type uses this table to find devices that it sends to
* dasd_eckd_probe */
static struct ccw_device_id dasd_eckd_ids[] = {
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
static struct ccw_driver dasd_eckd_driver; /* see below */
static void *rawpadpage;
#define INIT_CQR_OK 0
#define INIT_CQR_UNFORMATTED 1
#define INIT_CQR_ERROR 2
/* emergency request for reserve/release */
static struct {
struct dasd_ccw_req cqr;
struct ccw1 ccw;
char data[32];
} *dasd_reserve_req;
static DEFINE_MUTEX(dasd_reserve_mutex);
static struct {
struct dasd_ccw_req cqr;
struct ccw1 ccw[2];
char data[40];
} *dasd_vol_info_req;
static DEFINE_MUTEX(dasd_vol_info_mutex);
struct ext_pool_exhaust_work_data {
struct work_struct worker;
struct dasd_device *device;
struct dasd_device *base;
};
/* definitions for the path verification worker */
struct pe_handler_work_data {
struct work_struct worker;
struct dasd_device *device;
struct dasd_ccw_req cqr;
struct ccw1 ccw;
__u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
int isglobal;
__u8 tbvpm;
__u8 fcsecpm;
};
static struct pe_handler_work_data *pe_handler_worker;
static DEFINE_MUTEX(dasd_pe_handler_mutex);
struct check_attention_work_data {
struct work_struct worker;
struct dasd_device *device;
__u8 lpum;
};
static int dasd_eckd_ext_pool_id(struct dasd_device *);
static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
struct dasd_device *, struct dasd_device *,
unsigned int, int, unsigned int, unsigned int,
unsigned int, unsigned int);
static int dasd_eckd_query_pprc_status(struct dasd_device *,
struct dasd_pprc_data_sc4 *);
/* initial attempt at a probe function. this can be simplified once
* the other detection code is gone */
static int
dasd_eckd_probe (struct ccw_device *cdev)
{
int ret;
/* set ECKD specific ccw-device options */
ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
if (ret) {
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
"dasd_eckd_probe: could not set "
"ccw-device options");
return ret;
}
ret = dasd_generic_probe(cdev);
return ret;
}
static int
dasd_eckd_set_online(struct ccw_device *cdev)
{
return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
}
static const int sizes_trk0[] = { 28, 148, 84 };
#define LABEL_SIZE 140
/* head and record addresses of count_area read in analysis ccw */
static const int count_area_head[] = { 0, 0, 0, 0, 1 };
static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
static inline unsigned int
ceil_quot(unsigned int d1, unsigned int d2)
{
return (d1 + (d2 - 1)) / d2;
}
static unsigned int
recs_per_track(struct dasd_eckd_characteristics * rdc,
unsigned int kl, unsigned int dl)
{
int dn, kn;
switch (rdc->dev_type) {
case 0x3380:
if (kl)
return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
ceil_quot(dl + 12, 32));
else
return 1499 / (15 + ceil_quot(dl + 12, 32));
case 0x3390:
dn = ceil_quot(dl + 6, 232) + 1;
if (kl) {
kn = ceil_quot(kl + 6, 232) + 1;
return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
9 + ceil_quot(dl + 6 * dn, 34));
} else
return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
case 0x9345:
dn = ceil_quot(dl + 6, 232) + 1;
if (kl) {
kn = ceil_quot(kl + 6, 232) + 1;
return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
ceil_quot(dl + 6 * dn, 34));
} else
return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
}
return 0;
}
static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
{
geo->cyl = (__u16) cyl;
geo->head = cyl >> 16;
geo->head <<= 4;
geo->head |= head;
}
/*
* calculate failing track from sense data depending if
* it is an EAV device or not
*/
static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
sector_t *track)
{
struct dasd_eckd_private *private = device->private;
u8 *sense = NULL;
u32 cyl;
u8 head;
sense = dasd_get_sense(irb);
if (!sense) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"ESE error no sense data\n");
return -EINVAL;
}
if (!(sense[27] & DASD_SENSE_BIT_2)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"ESE error no valid track data\n");
return -EINVAL;
}
if (sense[27] & DASD_SENSE_BIT_3) {
/* enhanced addressing */
cyl = sense[30] << 20;
cyl |= (sense[31] & 0xF0) << 12;
cyl |= sense[28] << 8;
cyl |= sense[29];
} else {
cyl = sense[29] << 8;
cyl |= sense[30];
}
head = sense[31] & 0x0F;
*track = cyl * private->rdc_data.trk_per_cyl + head;
return 0;
}
static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int rc;
rc = get_phys_clock(&data->ep_sys_time);
/*
* Ignore return code if XRC is not supported or
* sync clock is switched off
*/
if ((rc && !private->rdc_data.facilities.XRC_supported) ||
rc == -EOPNOTSUPP || rc == -EACCES)
return 0;
/* switch on System Time Stamp - needed for XRC Support */
data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
if (ccw) {
ccw->count = sizeof(struct DE_eckd_data);
ccw->flags |= CCW_FLAG_SLI;
}
return rc;
}
static int
define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
unsigned int totrk, int cmd, struct dasd_device *device,
int blksize)
{
struct dasd_eckd_private *private = device->private;
u16 heads, beghead, endhead;
u32 begcyl, endcyl;
int rc = 0;
if (ccw) {
ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
ccw->cda = (__u32)virt_to_phys(data);
}
memset(data, 0, sizeof(struct DE_eckd_data));
switch (cmd) {
case DASD_ECKD_CCW_READ_HOME_ADDRESS:
case DASD_ECKD_CCW_READ_RECORD_ZERO:
case DASD_ECKD_CCW_READ:
case DASD_ECKD_CCW_READ_MT:
case DASD_ECKD_CCW_READ_CKD:
case DASD_ECKD_CCW_READ_CKD_MT:
case DASD_ECKD_CCW_READ_KD:
case DASD_ECKD_CCW_READ_KD_MT:
data->mask.perm = 0x1;
data->attributes.operation = private->attrib.operation;
break;
case DASD_ECKD_CCW_READ_COUNT:
data->mask.perm = 0x1;
data->attributes.operation = DASD_BYPASS_CACHE;
break;
case DASD_ECKD_CCW_READ_TRACK:
case DASD_ECKD_CCW_READ_TRACK_DATA:
data->mask.perm = 0x1;
data->attributes.operation = private->attrib.operation;
data->blk_size = 0;
break;
case DASD_ECKD_CCW_WRITE:
case DASD_ECKD_CCW_WRITE_MT:
case DASD_ECKD_CCW_WRITE_KD:
case DASD_ECKD_CCW_WRITE_KD_MT:
data->mask.perm = 0x02;
data->attributes.operation = private->attrib.operation;
rc = set_timestamp(ccw, data, device);
break;
case DASD_ECKD_CCW_WRITE_CKD:
case DASD_ECKD_CCW_WRITE_CKD_MT:
data->attributes.operation = DASD_BYPASS_CACHE;
rc = set_timestamp(ccw, data, device);
break;
case DASD_ECKD_CCW_ERASE:
case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
data->mask.perm = 0x3;
data->mask.auth = 0x1;
data->attributes.operation = DASD_BYPASS_CACHE;
rc = set_timestamp(ccw, data, device);
break;
case DASD_ECKD_CCW_WRITE_FULL_TRACK:
data->mask.perm = 0x03;
data->attributes.operation = private->attrib.operation;
data->blk_size = 0;
break;
case DASD_ECKD_CCW_WRITE_TRACK_DATA:
data->mask.perm = 0x02;
data->attributes.operation = private->attrib.operation;
data->blk_size = blksize;
rc = set_timestamp(ccw, data, device);
break;
default:
dev_err(&device->cdev->dev,
"0x%x is not a known command\n", cmd);
break;
}
data->attributes.mode = 0x3; /* ECKD */
if ((private->rdc_data.cu_type == 0x2105 ||
private->rdc_data.cu_type == 0x2107 ||
private->rdc_data.cu_type == 0x1750)
&& !(private->uses_cdl && trk < 2))
data->ga_extended |= 0x40; /* Regular Data Format Mode */
heads = private->rdc_data.trk_per_cyl;
begcyl = trk / heads;
beghead = trk % heads;
endcyl = totrk / heads;
endhead = totrk % heads;
/* check for sequential prestage - enhance cylinder range */
if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
data->attributes.operation == DASD_SEQ_ACCESS) {
if (endcyl + private->attrib.nr_cyl < private->real_cyl)
endcyl += private->attrib.nr_cyl;
else
endcyl = (private->real_cyl - 1);
}
set_ch_t(&data->beg_ext, begcyl, beghead);
set_ch_t(&data->end_ext, endcyl, endhead);
return rc;
}
static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
unsigned int trk, unsigned int rec_on_trk,
int count, int cmd, struct dasd_device *device,
unsigned int reclen, unsigned int tlf)
{
struct dasd_eckd_private *private = device->private;
int sector;
int dn, d;
if (ccw) {
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
ccw->flags = 0;
if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
ccw->count = 22;
else
ccw->count = 20;
ccw->cda = (__u32)virt_to_phys(data);
}
memset(data, 0, sizeof(*data));
sector = 0;
if (rec_on_trk) {
switch (private->rdc_data.dev_type) {
case 0x3390:
dn = ceil_quot(reclen + 6, 232);
d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
break;
case 0x3380:
d = 7 + ceil_quot(reclen + 12, 32);
sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
break;
}
}
data->sector = sector;
/* note: meaning of count depends on the operation
* for record based I/O it's the number of records, but for
* track based I/O it's the number of tracks
*/
data->count = count;
switch (cmd) {
case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
data->operation.orientation = 0x3;
data->operation.operation = 0x03;
break;
case DASD_ECKD_CCW_READ_HOME_ADDRESS:
data->operation.orientation = 0x3;
data->operation.operation = 0x16;
break;
case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
data->operation.orientation = 0x1;
data->operation.operation = 0x03;
data->count++;
break;
case DASD_ECKD_CCW_READ_RECORD_ZERO:
data->operation.orientation = 0x3;
data->operation.operation = 0x16;
data->count++;
break;
case DASD_ECKD_CCW_WRITE:
case DASD_ECKD_CCW_WRITE_MT:
case DASD_ECKD_CCW_WRITE_KD:
case DASD_ECKD_CCW_WRITE_KD_MT:
data->auxiliary.length_valid = 0x1;
data->length = reclen;
data->operation.operation = 0x01;
break;
case DASD_ECKD_CCW_WRITE_CKD:
case DASD_ECKD_CCW_WRITE_CKD_MT:
data->auxiliary.length_valid = 0x1;
data->length = reclen;
data->operation.operation = 0x03;
break;
case DASD_ECKD_CCW_WRITE_FULL_TRACK:
data->operation.orientation = 0x0;
data->operation.operation = 0x3F;
data->extended_operation = 0x11;
data->length = 0;
data->extended_parameter_length = 0x02;
if (data->count > 8) {
data->extended_parameter[0] = 0xFF;
data->extended_parameter[1] = 0xFF;
data->extended_parameter[1] <<= (16 - count);
} else {
data->extended_parameter[0] = 0xFF;
data->extended_parameter[0] <<= (8 - count);
data->extended_parameter[1] = 0x00;
}
data->sector = 0xFF;
break;
case DASD_ECKD_CCW_WRITE_TRACK_DATA:
data->auxiliary.length_valid = 0x1;
data->length = reclen; /* not tlf, as one might think */
data->operation.operation = 0x3F;
data->extended_operation = 0x23;
break;
case DASD_ECKD_CCW_READ:
case DASD_ECKD_CCW_READ_MT:
case DASD_ECKD_CCW_READ_KD:
case DASD_ECKD_CCW_READ_KD_MT:
data->auxiliary.length_valid = 0x1;
data->length = reclen;
data->operation.operation = 0x06;
break;
case DASD_ECKD_CCW_READ_CKD:
case DASD_ECKD_CCW_READ_CKD_MT:
data->auxiliary.length_valid = 0x1;
data->length = reclen;
data->operation.operation = 0x16;
break;
case DASD_ECKD_CCW_READ_COUNT:
data->operation.operation = 0x06;
break;
case DASD_ECKD_CCW_READ_TRACK:
data->operation.orientation = 0x1;
data->operation.operation = 0x0C;
data->extended_parameter_length = 0;
data->sector = 0xFF;
break;
case DASD_ECKD_CCW_READ_TRACK_DATA:
data->auxiliary.length_valid = 0x1;
data->length = tlf;
data->operation.operation = 0x0C;
break;
case DASD_ECKD_CCW_ERASE:
data->length = reclen;
data->auxiliary.length_valid = 0x1;
data->operation.operation = 0x0b;
break;
default:
DBF_DEV_EVENT(DBF_ERR, device,
"fill LRE unknown opcode 0x%x", cmd);
BUG();
}
set_ch_t(&data->seek_addr,
trk / private->rdc_data.trk_per_cyl,
trk % private->rdc_data.trk_per_cyl);
data->search_arg.cyl = data->seek_addr.cyl;
data->search_arg.head = data->seek_addr.head;
data->search_arg.record = rec_on_trk;
}
static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
unsigned int trk, unsigned int totrk, int cmd,
struct dasd_device *basedev, struct dasd_device *startdev,
unsigned int format, unsigned int rec_on_trk, int count,
unsigned int blksize, unsigned int tlf)
{
struct dasd_eckd_private *basepriv, *startpriv;
struct LRE_eckd_data *lredata;
struct DE_eckd_data *dedata;
int rc = 0;
basepriv = basedev->private;
startpriv = startdev->private;
dedata = &pfxdata->define_extent;
lredata = &pfxdata->locate_record;
ccw->cmd_code = DASD_ECKD_CCW_PFX;
ccw->flags = 0;
if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
ccw->count = sizeof(*pfxdata) + 2;
ccw->cda = (__u32)virt_to_phys(pfxdata);
memset(pfxdata, 0, sizeof(*pfxdata) + 2);
} else {
ccw->count = sizeof(*pfxdata);
ccw->cda = (__u32)virt_to_phys(pfxdata);
memset(pfxdata, 0, sizeof(*pfxdata));
}
/* prefix data */
if (format > 1) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"PFX LRE unknown format 0x%x", format);
BUG();
return -EINVAL;
}
pfxdata->format = format;
pfxdata->base_address = basepriv->conf.ned->unit_addr;
pfxdata->base_lss = basepriv->conf.ned->ID;
pfxdata->validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
pfxdata->validity.verify_base = 1;
if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
pfxdata->validity.verify_base = 1;
pfxdata->validity.hyper_pav = 1;
}
rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
/*
* For some commands the System Time Stamp is set in the define extent
* data when XRC is supported. The validity of the time stamp must be
* reflected in the prefix data as well.
*/
if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
if (format == 1) {
locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
basedev, blksize, tlf);
}
return rc;
}
static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
unsigned int trk, unsigned int totrk, int cmd,
struct dasd_device *basedev, struct dasd_device *startdev)
{
return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
0, 0, 0, 0, 0);
}
static void
locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
unsigned int rec_on_trk, int no_rec, int cmd,
struct dasd_device * device, int reclen)
{
struct dasd_eckd_private *private = device->private;
int sector;
int dn, d;
DBF_DEV_EVENT(DBF_INFO, device,
"Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
trk, rec_on_trk, no_rec, cmd, reclen);
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
ccw->flags = 0;
ccw->count = 16;
ccw->cda = (__u32)virt_to_phys(data);
memset(data, 0, sizeof(struct LO_eckd_data));
sector = 0;
if (rec_on_trk) {
switch (private->rdc_data.dev_type) {
case 0x3390:
dn = ceil_quot(reclen + 6, 232);
d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
break;
case 0x3380:
d = 7 + ceil_quot(reclen + 12, 32);
sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
break;
}
}
data->sector = sector;
data->count = no_rec;
switch (cmd) {
case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
data->operation.orientation = 0x3;
data->operation.operation = 0x03;
break;
case DASD_ECKD_CCW_READ_HOME_ADDRESS:
data->operation.orientation = 0x3;
data->operation.operation = 0x16;
break;
case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
data->operation.orientation = 0x1;
data->operation.operation = 0x03;
data->count++;
break;
case DASD_ECKD_CCW_READ_RECORD_ZERO:
data->operation.orientation = 0x3;
data->operation.operation = 0x16;
data->count++;
break;
case DASD_ECKD_CCW_WRITE:
case DASD_ECKD_CCW_WRITE_MT:
case DASD_ECKD_CCW_WRITE_KD:
case DASD_ECKD_CCW_WRITE_KD_MT:
data->auxiliary.last_bytes_used = 0x1;
data->length = reclen;
data->operation.operation = 0x01;
break;
case DASD_ECKD_CCW_WRITE_CKD:
case DASD_ECKD_CCW_WRITE_CKD_MT:
data->auxiliary.last_bytes_used = 0x1;
data->length = reclen;
data->operation.operation = 0x03;
break;
case DASD_ECKD_CCW_READ:
case DASD_ECKD_CCW_READ_MT:
case DASD_ECKD_CCW_READ_KD:
case DASD_ECKD_CCW_READ_KD_MT:
data->auxiliary.last_bytes_used = 0x1;
data->length = reclen;
data->operation.operation = 0x06;
break;
case DASD_ECKD_CCW_READ_CKD:
case DASD_ECKD_CCW_READ_CKD_MT:
data->auxiliary.last_bytes_used = 0x1;
data->length = reclen;
data->operation.operation = 0x16;
break;
case DASD_ECKD_CCW_READ_COUNT:
data->operation.operation = 0x06;
break;
case DASD_ECKD_CCW_ERASE:
data->length = reclen;
data->auxiliary.last_bytes_used = 0x1;
data->operation.operation = 0x0b;
break;
default:
DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
"opcode 0x%x", cmd);
}
set_ch_t(&data->seek_addr,
trk / private->rdc_data.trk_per_cyl,
trk % private->rdc_data.trk_per_cyl);
data->search_arg.cyl = data->seek_addr.cyl;
data->search_arg.head = data->seek_addr.head;
data->search_arg.record = rec_on_trk;
}
/*
* Returns 1 if the block is one of the special blocks that needs
* to get read/written with the KD variant of the command.
* That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
* DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
* Luckily the KD variants differ only by one bit (0x08) from the
* normal variant. So don't wonder about code like:
* if (dasd_eckd_cdl_special(blk_per_trk, recid))
* ccw->cmd_code |= 0x8;
*/
static inline int
dasd_eckd_cdl_special(int blk_per_trk, int recid)
{
if (recid < 3)
return 1;
if (recid < blk_per_trk)
return 0;
if (recid < 2 * blk_per_trk)
return 1;
return 0;
}
/*
* Returns the record size for the special blocks of the cdl format.
* Only returns something useful if dasd_eckd_cdl_special is true
* for the recid.
*/
static inline int
dasd_eckd_cdl_reclen(int recid)
{
if (recid < 3)
return sizes_trk0[recid];
return LABEL_SIZE;
}
/* create unique id from private structure. */
static void create_uid(struct dasd_conf *conf, struct dasd_uid *uid)
{
int count;
memset(uid, 0, sizeof(struct dasd_uid));
memcpy(uid->vendor, conf->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
memcpy(uid->serial, &conf->ned->serial,
sizeof(uid->serial) - 1);
EBCASC(uid->serial, sizeof(uid->serial) - 1);
uid->ssid = conf->gneq->subsystemID;
uid->real_unit_addr = conf->ned->unit_addr;
if (conf->sneq) {
uid->type = conf->sneq->sua_flags;
if (uid->type == UA_BASE_PAV_ALIAS)
uid->base_unit_addr = conf->sneq->base_unit_addr;
} else {
uid->type = UA_BASE_DEVICE;
}
if (conf->vdsneq) {
for (count = 0; count < 16; count++) {
sprintf(uid->vduit+2*count, "%02x",
conf->vdsneq->uit[count]);
}
}
}
/*
* Generate device unique id that specifies the physical device.
*/
static int dasd_eckd_generate_uid(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
unsigned long flags;
if (!private)
return -ENODEV;
if (!private->conf.ned || !private->conf.gneq)
return -ENODEV;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
create_uid(&private->conf, &private->uid);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
{
struct dasd_eckd_private *private = device->private;
unsigned long flags;
if (private) {
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
*uid = private->uid;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
return -EINVAL;
}
/*
* compare device UID with data of a given dasd_eckd_private structure
* return 0 for match
*/
static int dasd_eckd_compare_path_uid(struct dasd_device *device,
struct dasd_conf *path_conf)
{
struct dasd_uid device_uid;
struct dasd_uid path_uid;
create_uid(path_conf, &path_uid);
dasd_eckd_get_uid(device, &device_uid);
return memcmp(&device_uid, &path_uid, sizeof(struct dasd_uid));
}
static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
struct dasd_ccw_req *cqr,
__u8 *rcd_buffer,
__u8 lpm)
{
struct ccw1 *ccw;
/*
* buffer has to start with EBCDIC "V1.0" to show
* support for virtual device SNEQ
*/
rcd_buffer[0] = 0xE5;
rcd_buffer[1] = 0xF1;
rcd_buffer[2] = 0x4B;
rcd_buffer[3] = 0xF0;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RCD;
ccw->flags = 0;
ccw->cda = (__u32)virt_to_phys(rcd_buffer);
ccw->count = DASD_ECKD_RCD_DATA_SIZE;
cqr->magic = DASD_ECKD_MAGIC;
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->expires = 10*HZ;
cqr->lpm = lpm;
cqr->retries = 256;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
}
/*
* Wakeup helper for read_conf
* if the cqr is not done and needs some error recovery
* the buffer has to be re-initialized with the EBCDIC "V1.0"
* to show support for virtual device SNEQ
*/
static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
{
struct ccw1 *ccw;
__u8 *rcd_buffer;
if (cqr->status != DASD_CQR_DONE) {
ccw = cqr->cpaddr;
rcd_buffer = phys_to_virt(ccw->cda);
memset(rcd_buffer, 0, sizeof(*rcd_buffer));
rcd_buffer[0] = 0xE5;
rcd_buffer[1] = 0xF1;
rcd_buffer[2] = 0x4B;
rcd_buffer[3] = 0xF0;
}
dasd_wakeup_cb(cqr, data);
}
static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
struct dasd_ccw_req *cqr,
__u8 *rcd_buffer,
__u8 lpm)
{
struct ciw *ciw;
int rc;
/*
* sanity check: scan for RCD command in extended SenseID data
* some devices do not support RCD
*/
ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
return -EOPNOTSUPP;
dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
cqr->retries = 5;
cqr->callback = read_conf_cb;
rc = dasd_sleep_on_immediatly(cqr);
return rc;
}
static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
void **rcd_buffer,
int *rcd_buffer_size, __u8 lpm)
{
struct ciw *ciw;
char *rcd_buf = NULL;
int ret;
struct dasd_ccw_req *cqr;
/*
* sanity check: scan for RCD command in extended SenseID data
* some devices do not support RCD
*/
ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
ret = -EOPNOTSUPP;
goto out_error;
}
rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
if (!rcd_buf) {
ret = -ENOMEM;
goto out_error;
}
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
0, /* use rcd_buf as data ara */
device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate RCD request");
ret = -ENOMEM;
goto out_error;
}
dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
cqr->callback = read_conf_cb;
ret = dasd_sleep_on(cqr);
/*
* on success we update the user input parms
*/
dasd_sfree_request(cqr, cqr->memdev);
if (ret)
goto out_error;
*rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
*rcd_buffer = rcd_buf;
return 0;
out_error:
kfree(rcd_buf);
*rcd_buffer = NULL;
*rcd_buffer_size = 0;
return ret;
}
static int dasd_eckd_identify_conf_parts(struct dasd_conf *conf)
{
struct dasd_sneq *sneq;
int i, count;
conf->ned = NULL;
conf->sneq = NULL;
conf->vdsneq = NULL;
conf->gneq = NULL;
count = conf->len / sizeof(struct dasd_sneq);
sneq = (struct dasd_sneq *)conf->data;
for (i = 0; i < count; ++i) {
if (sneq->flags.identifier == 1 && sneq->format == 1)
conf->sneq = sneq;
else if (sneq->flags.identifier == 1 && sneq->format == 4)
conf->vdsneq = (struct vd_sneq *)sneq;
else if (sneq->flags.identifier == 2)
conf->gneq = (struct dasd_gneq *)sneq;
else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
conf->ned = (struct dasd_ned *)sneq;
sneq++;
}
if (!conf->ned || !conf->gneq) {
conf->ned = NULL;
conf->sneq = NULL;
conf->vdsneq = NULL;
conf->gneq = NULL;
return -EINVAL;
}
return 0;
};
static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
{
struct dasd_gneq *gneq;
int i, count, found;
count = conf_len / sizeof(*gneq);
gneq = (struct dasd_gneq *)conf_data;
found = 0;
for (i = 0; i < count; ++i) {
if (gneq->flags.identifier == 2) {
found = 1;
break;
}
gneq++;
}
if (found)
return ((char *)gneq)[18] & 0x07;
else
return 0;
}
static void dasd_eckd_store_conf_data(struct dasd_device *device,
struct dasd_conf_data *conf_data, int chp)
{
struct dasd_eckd_private *private = device->private;
struct channel_path_desc_fmt0 *chp_desc;
struct subchannel_id sch_id;
void *cdp;
/*
* path handling and read_conf allocate data
* free it before replacing the pointer
* also replace the old private->conf_data pointer
* with the new one if this points to the same data
*/
cdp = device->path[chp].conf_data;
if (private->conf.data == cdp) {
private->conf.data = (void *)conf_data;
dasd_eckd_identify_conf_parts(&private->conf);
}
ccw_device_get_schid(device->cdev, &sch_id);
device->path[chp].conf_data = conf_data;
device->path[chp].cssid = sch_id.cssid;
device->path[chp].ssid = sch_id.ssid;
chp_desc = ccw_device_get_chp_desc(device->cdev, chp);
if (chp_desc)
device->path[chp].chpid = chp_desc->chpid;
kfree(chp_desc);
kfree(cdp);
}
static void dasd_eckd_clear_conf_data(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int i;
private->conf.data = NULL;
private->conf.len = 0;
for (i = 0; i < 8; i++) {
kfree(device->path[i].conf_data);
device->path[i].conf_data = NULL;
device->path[i].cssid = 0;
device->path[i].ssid = 0;
device->path[i].chpid = 0;
dasd_path_notoper(device, i);
}
}
static void dasd_eckd_read_fc_security(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
u8 esm_valid;
u8 esm[8];
int chp;
int rc;
rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid);
if (rc) {
for (chp = 0; chp < 8; chp++)
device->path[chp].fc_security = 0;
return;
}
for (chp = 0; chp < 8; chp++) {
if (esm_valid & (0x80 >> chp))
device->path[chp].fc_security = esm[chp];
else
device->path[chp].fc_security = 0;
}
}
static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
char *print_uid)
{
struct dasd_uid uid;
create_uid(conf, &uid);
if (strlen(uid.vduit) > 0)
snprintf(print_uid, DASD_UID_STRLEN,
"%s.%s.%04x.%02x.%s",
uid.vendor, uid.serial, uid.ssid,
uid.real_unit_addr, uid.vduit);
else
snprintf(print_uid, DASD_UID_STRLEN,
"%s.%s.%04x.%02x",
uid.vendor, uid.serial, uid.ssid,
uid.real_unit_addr);
}
static int dasd_eckd_check_cabling(struct dasd_device *device,
void *conf_data, __u8 lpm)
{
char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN];
struct dasd_eckd_private *private = device->private;
struct dasd_conf path_conf;
path_conf.data = conf_data;
path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
if (dasd_eckd_identify_conf_parts(&path_conf))
return 1;
if (dasd_eckd_compare_path_uid(device, &path_conf)) {
dasd_eckd_get_uid_string(&path_conf, print_path_uid);
dasd_eckd_get_uid_string(&private->conf, print_device_uid);
dev_err(&device->cdev->dev,
"Not all channel paths lead to the same device, path %02X leads to device %s instead of %s\n",
lpm, print_path_uid, print_device_uid);
return 1;
}
return 0;
}
static int dasd_eckd_read_conf(struct dasd_device *device)
{
void *conf_data;
int conf_len, conf_data_saved;
int rc, path_err, pos;
__u8 lpm, opm;
struct dasd_eckd_private *private;
private = device->private;
opm = ccw_device_get_path_mask(device->cdev);
conf_data_saved = 0;
path_err = 0;
/* get configuration data per operational path */
for (lpm = 0x80; lpm; lpm>>= 1) {
if (!(lpm & opm))
continue;
rc = dasd_eckd_read_conf_lpm(device, &conf_data,
&conf_len, lpm);
if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Read configuration data returned "
"error %d", rc);
return rc;
}
if (conf_data == NULL) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"No configuration data "
"retrieved");
/* no further analysis possible */
dasd_path_add_opm(device, opm);
continue; /* no error */
}
/* save first valid configuration data */
if (!conf_data_saved) {
/* initially clear previously stored conf_data */
dasd_eckd_clear_conf_data(device);
private->conf.data = conf_data;
private->conf.len = conf_len;
if (dasd_eckd_identify_conf_parts(&private->conf)) {
private->conf.data = NULL;
private->conf.len = 0;
kfree(conf_data);
continue;
}
/*
* build device UID that other path data
* can be compared to it
*/
dasd_eckd_generate_uid(device);
conf_data_saved++;
} else if (dasd_eckd_check_cabling(device, conf_data, lpm)) {
dasd_path_add_cablepm(device, lpm);
path_err = -EINVAL;
kfree(conf_data);
continue;
}
pos = pathmask_to_pos(lpm);
dasd_eckd_store_conf_data(device, conf_data, pos);
switch (dasd_eckd_path_access(conf_data, conf_len)) {
case 0x02:
dasd_path_add_nppm(device, lpm);
break;
case 0x03:
dasd_path_add_ppm(device, lpm);
break;
}
if (!dasd_path_get_opm(device)) {
dasd_path_set_opm(device, lpm);
dasd_generic_path_operational(device);
} else {
dasd_path_add_opm(device, lpm);
}
}
return path_err;
}
static u32 get_fcx_max_data(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int fcx_in_css, fcx_in_gneq, fcx_in_features;
unsigned int mdc;
int tpm;
if (dasd_nofcx)
return 0;
/* is transport mode supported? */
fcx_in_css = css_general_characteristics.fcx;
fcx_in_gneq = private->conf.gneq->reserved2[7] & 0x04;
fcx_in_features = private->features.feature[40] & 0x80;
tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
if (!tpm)
return 0;
mdc = ccw_device_get_mdc(device->cdev, 0);
if (mdc == 0) {
dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
return 0;
} else {
return (u32)mdc * FCX_MAX_DATA_FACTOR;
}
}
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
{
struct dasd_eckd_private *private = device->private;
unsigned int mdc;
u32 fcx_max_data;
if (private->fcx_max_data) {
mdc = ccw_device_get_mdc(device->cdev, lpm);
if (mdc == 0) {
dev_warn(&device->cdev->dev,
"Detecting the maximum data size for zHPF "
"requests failed (rc=%d) for a new path %x\n",
mdc, lpm);
return mdc;
}
fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
if (fcx_max_data < private->fcx_max_data) {
dev_warn(&device->cdev->dev,
"The maximum data size for zHPF requests %u "
"on a new path %x is below the active maximum "
"%u\n", fcx_max_data, lpm,
private->fcx_max_data);
return -EACCES;
}
}
return 0;
}
static int rebuild_device_uid(struct dasd_device *device,
struct pe_handler_work_data *data)
{
struct dasd_eckd_private *private = device->private;
__u8 lpm, opm = dasd_path_get_opm(device);
int rc = -ENODEV;
for (lpm = 0x80; lpm; lpm >>= 1) {
if (!(lpm & opm))
continue;
memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
memset(&data->cqr, 0, sizeof(data->cqr));
data->cqr.cpaddr = &data->ccw;
rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
data->rcd_buffer,
lpm);
if (rc) {
if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
continue;
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Read configuration data "
"returned error %d", rc);
break;
}
memcpy(private->conf.data, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
if (dasd_eckd_identify_conf_parts(&private->conf)) {
rc = -ENODEV;
} else /* first valid path is enough */
break;
}
if (!rc)
rc = dasd_eckd_generate_uid(device);
return rc;
}
static void dasd_eckd_path_available_action(struct dasd_device *device,
struct pe_handler_work_data *data)
{
__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
struct dasd_conf_data *conf_data;
char print_uid[DASD_UID_STRLEN];
struct dasd_conf path_conf;
unsigned long flags;
int rc, pos;
opm = 0;
npm = 0;
ppm = 0;
epm = 0;
hpfpm = 0;
cablepm = 0;
for (lpm = 0x80; lpm; lpm >>= 1) {
if (!(lpm & data->tbvpm))
continue;
memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
memset(&data->cqr, 0, sizeof(data->cqr));
data->cqr.cpaddr = &data->ccw;
rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
data->rcd_buffer,
lpm);
if (!rc) {
switch (dasd_eckd_path_access(data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE)
) {
case 0x02:
npm |= lpm;
break;
case 0x03:
ppm |= lpm;
break;
}
opm |= lpm;
} else if (rc == -EOPNOTSUPP) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"path verification: No configuration "
"data retrieved");
opm |= lpm;
} else if (rc == -EAGAIN) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"path verification: device is stopped,"
" try again later");
epm |= lpm;
} else {
dev_warn(&device->cdev->dev,
"Reading device feature codes failed "
"(rc=%d) for new path %x\n", rc, lpm);
continue;
}
if (verify_fcx_max_data(device, lpm)) {
opm &= ~lpm;
npm &= ~lpm;
ppm &= ~lpm;
hpfpm |= lpm;
continue;
}
/*
* save conf_data for comparison after
* rebuild_device_uid may have changed
* the original data
*/
memcpy(&path_rcd_buf, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
path_conf.data = (void *)&path_rcd_buf;
path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
if (dasd_eckd_identify_conf_parts(&path_conf)) {
path_conf.data = NULL;
path_conf.len = 0;
continue;
}
/*
* compare path UID with device UID only if at least
* one valid path is left
* in other case the device UID may have changed and
* the first working path UID will be used as device UID
*/
if (dasd_path_get_opm(device) &&
dasd_eckd_compare_path_uid(device, &path_conf)) {
/*
* the comparison was not successful
* rebuild the device UID with at least one
* known path in case a z/VM hyperswap command
* has changed the device
*
* after this compare again
*
* if either the rebuild or the recompare fails
* the path can not be used
*/
if (rebuild_device_uid(device, data) ||
dasd_eckd_compare_path_uid(
device, &path_conf)) {
dasd_eckd_get_uid_string(&path_conf, print_uid);
dev_err(&device->cdev->dev,
"The newly added channel path %02X "
"will not be used because it leads "
"to a different device %s\n",
lpm, print_uid);
opm &= ~lpm;
npm &= ~lpm;
ppm &= ~lpm;
cablepm |= lpm;
continue;
}
}
conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL);
if (conf_data) {
memcpy(conf_data, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
} else {
/*
* path is operational but path config data could not
* be stored due to low mem condition
* add it to the error path mask and schedule a path
* verification later that this could be added again
*/
epm |= lpm;
}
pos = pathmask_to_pos(lpm);
dasd_eckd_store_conf_data(device, conf_data, pos);
/*
* There is a small chance that a path is lost again between
* above path verification and the following modification of
* the device opm mask. We could avoid that race here by using
* yet another path mask, but we rather deal with this unlikely
* situation in dasd_start_IO.
*/
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
if (!dasd_path_get_opm(device) && opm) {
dasd_path_set_opm(device, opm);
dasd_generic_path_operational(device);
} else {
dasd_path_add_opm(device, opm);
}
dasd_path_add_nppm(device, npm);
dasd_path_add_ppm(device, ppm);
if (epm) {
dasd_path_add_tbvpm(device, epm);
dasd_device_set_timer(device, 50);
}
dasd_path_add_cablepm(device, cablepm);
dasd_path_add_nohpfpm(device, hpfpm);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_path_create_kobj(device, pos);
}
}
static void do_pe_handler_work(struct work_struct *work)
{
struct pe_handler_work_data *data;
struct dasd_device *device;
data = container_of(work, struct pe_handler_work_data, worker);
device = data->device;
/* delay path verification until device was resumed */
if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
schedule_work(work);
return;
}
/* check if path verification already running and delay if so */
if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
schedule_work(work);
return;
}
if (data->tbvpm)
dasd_eckd_path_available_action(device, data);
if (data->fcsecpm)
dasd_eckd_read_fc_security(device);
clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
dasd_put_device(device);
if (data->isglobal)
mutex_unlock(&dasd_pe_handler_mutex);
else
kfree(data);
}
static int dasd_eckd_pe_handler(struct dasd_device *device,
__u8 tbvpm, __u8 fcsecpm)
{
struct pe_handler_work_data *data;
data = kzalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
if (!data) {
if (mutex_trylock(&dasd_pe_handler_mutex)) {
data = pe_handler_worker;
data->isglobal = 1;
} else {
return -ENOMEM;
}
}
INIT_WORK(&data->worker, do_pe_handler_work);
dasd_get_device(device);
data->device = device;
data->tbvpm = tbvpm;
data->fcsecpm = fcsecpm;
schedule_work(&data->worker);
return 0;
}
static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
{
struct dasd_eckd_private *private = device->private;
unsigned long flags;
if (!private->fcx_max_data)
private->fcx_max_data = get_fcx_max_data(device);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
static int dasd_eckd_read_features(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct dasd_psf_prssd_data *prssdp;
struct dasd_rssd_features *features;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
memset(&private->features, 0, sizeof(struct dasd_rssd_features));
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_features)),
device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
"allocate initialization request");
return PTR_ERR(cqr);
}
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = 0x41; /* Read Feature Codes */
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->cda = (__u32)virt_to_phys(prssdp);
/* Read Subsystem Data - feature codes */
features = (struct dasd_rssd_features *) (prssdp + 1);
memset(features, 0, sizeof(struct dasd_rssd_features));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_features);
ccw->cda = (__u32)virt_to_phys(features);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on(cqr);
if (rc == 0) {
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
features = (struct dasd_rssd_features *) (prssdp + 1);
memcpy(&private->features, features,
sizeof(struct dasd_rssd_features));
} else
dev_warn(&device->cdev->dev, "Reading device feature codes"
" failed with rc=%d\n", rc);
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/* Read Volume Information - Volume Storage Query */
static int dasd_eckd_read_vol_info(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct dasd_psf_prssd_data *prssdp;
struct dasd_rssd_vsq *vsq;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int useglobal;
int rc;
/* This command cannot be executed on an alias device */
if (private->uid.type == UA_BASE_PAV_ALIAS ||
private->uid.type == UA_HYPER_PAV_ALIAS)
return 0;
useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
sizeof(*prssdp) + sizeof(*vsq), device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate initialization request");
mutex_lock(&dasd_vol_info_mutex);
useglobal = 1;
cqr = &dasd_vol_info_req->cqr;
memset(cqr, 0, sizeof(*cqr));
memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req));
cqr->cpaddr = &dasd_vol_info_req->ccw;
cqr->data = &dasd_vol_info_req->data;
cqr->magic = DASD_ECKD_MAGIC;
}
/* Prepare for Read Subsystem Data */
prssdp = cqr->data;
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */
prssdp->lss = private->conf.ned->ID;
prssdp->volume = private->conf.ned->unit_addr;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(*prssdp);
ccw->flags |= CCW_FLAG_CC;
ccw->cda = (__u32)virt_to_phys(prssdp);
/* Read Subsystem Data - Volume Storage Query */
vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
memset(vsq, 0, sizeof(*vsq));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*vsq);
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = (__u32)virt_to_phys(vsq);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = device->default_expires * HZ;
/* The command might not be supported. Suppress the error output */
__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
rc = dasd_sleep_on_interruptible(cqr);
if (rc == 0) {
memcpy(&private->vsq, vsq, sizeof(*vsq));
} else {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading the volume storage information failed with rc=%d", rc);
}
if (useglobal)
mutex_unlock(&dasd_vol_info_mutex);
else
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
static int dasd_eckd_is_ese(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->vsq.vol_info.ese;
}
static int dasd_eckd_ext_pool_id(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->vsq.extent_pool_id;
}
/*
* This value represents the total amount of available space. As more space is
* allocated by ESE volumes, this value will decrease.
* The data for this value is therefore updated on any call.
*/
static int dasd_eckd_space_configured(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int rc;
rc = dasd_eckd_read_vol_info(device);
return rc ? : private->vsq.space_configured;
}
/*
* The value of space allocated by an ESE volume may have changed and is
* therefore updated on any call.
*/
static int dasd_eckd_space_allocated(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int rc;
rc = dasd_eckd_read_vol_info(device);
return rc ? : private->vsq.space_allocated;
}
static int dasd_eckd_logical_capacity(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->vsq.logical_capacity;
}
static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work)
{
struct ext_pool_exhaust_work_data *data;
struct dasd_device *device;
struct dasd_device *base;
data = container_of(work, struct ext_pool_exhaust_work_data, worker);
device = data->device;
base = data->base;
if (!base)
base = device;
if (dasd_eckd_space_configured(base) != 0) {
dasd_generic_space_avail(device);
} else {
dev_warn(&device->cdev->dev, "No space left in the extent pool\n");
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space");
}
dasd_put_device(device);
kfree(data);
}
static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
struct ext_pool_exhaust_work_data *data;
data = kzalloc(sizeof(*data), GFP_ATOMIC);
if (!data)
return -ENOMEM;
INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work);
dasd_get_device(device);
data->device = device;
if (cqr->block)
data->base = cqr->block->base;
else if (cqr->basedev)
data->base = cqr->basedev;
else
data->base = NULL;
schedule_work(&data->worker);
return 0;
}
static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
struct dasd_rssd_lcq *lcq)
{
struct dasd_eckd_private *private = device->private;
int pool_id = dasd_eckd_ext_pool_id(device);
struct dasd_ext_pool_sum eps;
int i;
for (i = 0; i < lcq->pool_count; i++) {
eps = lcq->ext_pool_sum[i];
if (eps.pool_id == pool_id) {
memcpy(&private->eps, &eps,
sizeof(struct dasd_ext_pool_sum));
}
}
}
/* Read Extent Pool Information - Logical Configuration Query */
static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct dasd_psf_prssd_data *prssdp;
struct dasd_rssd_lcq *lcq;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
/* This command cannot be executed on an alias device */
if (private->uid.type == UA_BASE_PAV_ALIAS ||
private->uid.type == UA_HYPER_PAV_ALIAS)
return 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
sizeof(*prssdp) + sizeof(*lcq), device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate initialization request");
return PTR_ERR(cqr);
}
/* Prepare for Read Subsystem Data */
prssdp = cqr->data;
memset(prssdp, 0, sizeof(*prssdp));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(*prssdp);
ccw->flags |= CCW_FLAG_CC;
ccw->cda = (__u32)virt_to_phys(prssdp);
lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
memset(lcq, 0, sizeof(*lcq));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*lcq);
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = (__u32)virt_to_phys(lcq);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = device->default_expires * HZ;
/* The command might not be supported. Suppress the error output */
__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
rc = dasd_sleep_on_interruptible(cqr);
if (rc == 0) {
dasd_eckd_cpy_ext_pool_data(device, lcq);
} else {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading the logical configuration failed with rc=%d", rc);
}
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Depending on the device type, the extent size is specified either as
* cylinders per extent (CKD) or size per extent (FBA)
* A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
*/
static int dasd_eckd_ext_size(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct dasd_ext_pool_sum eps = private->eps;
if (!eps.flags.extent_size_valid)
return 0;
if (eps.extent_size.size_1G)
return 1113;
if (eps.extent_size.size_16M)
return 21;
return 0;
}
static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->eps.warn_thrshld;
}
static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->eps.flags.capacity_at_warnlevel;
}
/*
* Extent Pool out of space
*/
static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->eps.flags.pool_oos;
}
/*
* Build CP for Perform Subsystem Function - SSC.
*/
static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
int enable_pav)
{
struct dasd_ccw_req *cqr;
struct dasd_psf_ssc_data *psf_ssc_data;
struct ccw1 *ccw;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
sizeof(struct dasd_psf_ssc_data),
device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate PSF-SSC request");
return cqr;
}
psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
psf_ssc_data->order = PSF_ORDER_SSC;
psf_ssc_data->suborder = 0xc0;
if (enable_pav) {
psf_ssc_data->suborder |= 0x08;
psf_ssc_data->reserved[0] = 0x88;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->cda = (__u32)virt_to_phys(psf_ssc_data);
ccw->count = 66;
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10*HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
/*
* Perform Subsystem Function.
* It is necessary to trigger CIO for channel revalidation since this
* call might change behaviour of DASD devices.
*/
static int
dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
unsigned long flags)
{
struct dasd_ccw_req *cqr;
int rc;
cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
/*
* set flags e.g. turn on failfast, to prevent blocking
* the calling function should handle failed requests
*/
cqr->flags |= flags;
rc = dasd_sleep_on(cqr);
if (!rc)
/* trigger CIO to reprobe devices */
css_schedule_reprobe();
else if (cqr->intrc == -EAGAIN)
rc = -EAGAIN;
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Valide storage server of current device.
*/
static int dasd_eckd_validate_server(struct dasd_device *device,
unsigned long flags)
{
struct dasd_eckd_private *private = device->private;
int enable_pav, rc;
if (private->uid.type == UA_BASE_PAV_ALIAS ||
private->uid.type == UA_HYPER_PAV_ALIAS)
return 0;
if (dasd_nopav || MACHINE_IS_VM)
enable_pav = 0;
else
enable_pav = 1;
rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
/* may be requested feature is not available on server,
* therefore just report error and go ahead */
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
"returned rc=%d", private->uid.ssid, rc);
return rc;
}
/*
* worker to do a validate server in case of a lost pathgroup
*/
static void dasd_eckd_do_validate_server(struct work_struct *work)
{
struct dasd_device *device = container_of(work, struct dasd_device,
kick_validate);
unsigned long flags = 0;
set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
if (dasd_eckd_validate_server(device, flags)
== -EAGAIN) {
/* schedule worker again if failed */
schedule_work(&device->kick_validate);
return;
}
dasd_put_device(device);
}
static void dasd_eckd_kick_validate_server(struct dasd_device *device)
{
dasd_get_device(device);
/* exit if device not online or in offline processing */
if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
device->state < DASD_STATE_ONLINE) {
dasd_put_device(device);
return;
}
/* queue call to do_validate_server to the kernel event daemon. */
if (!schedule_work(&device->kick_validate))
dasd_put_device(device);
}
/*
* return if the device is the copy relation primary if a copy relation is active
*/
static int dasd_device_is_primary(struct dasd_device *device)
{
if (!device->copy)
return 1;
if (device->copy->active->device == device)
return 1;
return 0;
}
static int dasd_eckd_alloc_block(struct dasd_device *device)
{
struct dasd_block *block;
struct dasd_uid temp_uid;
if (!dasd_device_is_primary(device))
return 0;
dasd_eckd_get_uid(device, &temp_uid);
if (temp_uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
if (IS_ERR(block)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"could not allocate dasd block structure");
return PTR_ERR(block);
}
device->block = block;
block->base = device;
}
return 0;
}
static bool dasd_eckd_pprc_enabled(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->rdc_data.facilities.PPRC_enabled;
}
/*
* Check device characteristics.
* If the device is accessible using ECKD discipline, the device is enabled.
*/
static int
dasd_eckd_check_characteristics(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
int rc, i;
int readonly;
unsigned long value;
/* setup work queue for validate server*/
INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
/* setup work queue for summary unit check */
INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
if (!ccw_device_is_pathgroup(device->cdev)) {
dev_warn(&device->cdev->dev,
"A channel path group could not be established\n");
return -EIO;
}
if (!ccw_device_is_multipath(device->cdev)) {
dev_info(&device->cdev->dev,
"The DASD is not operating in multipath mode\n");
}
if (!private) {
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private) {
dev_warn(&device->cdev->dev,
"Allocating memory for private DASD data "
"failed\n");
return -ENOMEM;
}
device->private = private;
} else {
memset(private, 0, sizeof(*private));
}
/* Invalidate status of initial analysis. */
private->init_cqr_status = -1;
/* Set default cache operations. */
private->attrib.operation = DASD_NORMAL_CACHE;
private->attrib.nr_cyl = 0;
/* Read Configuration Data */
rc = dasd_eckd_read_conf(device);
if (rc)
goto out_err1;
/* set some default values */
device->default_expires = DASD_EXPIRES;
device->default_retries = DASD_RETRIES;
device->path_thrhld = DASD_ECKD_PATH_THRHLD;
device->path_interval = DASD_ECKD_PATH_INTERVAL;
device->aq_timeouts = DASD_RETRIES_MAX;
if (private->conf.gneq) {
value = 1;
for (i = 0; i < private->conf.gneq->timeout.value; i++)
value = 10 * value;
value = value * private->conf.gneq->timeout.number;
/* do not accept useless values */
if (value != 0 && value <= DASD_EXPIRES_MAX)
device->default_expires = value;
}
/* Read Device Characteristics */
rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
&private->rdc_data, 64);
if (rc) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Read device characteristic failed, rc=%d", rc);
goto out_err1;
}
/* setup PPRC for device from devmap */
rc = dasd_devmap_set_device_copy_relation(device->cdev,
dasd_eckd_pprc_enabled(device));
if (rc) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"copy relation setup failed, rc=%d", rc);
goto out_err1;
}
/* check if block device is needed and allocate in case */
rc = dasd_eckd_alloc_block(device);
if (rc)
goto out_err1;
/* register lcu with alias handling, enable PAV */
rc = dasd_alias_make_device_known_to_lcu(device);
if (rc)
goto out_err2;
dasd_eckd_validate_server(device, 0);
/* device may report different configuration data after LCU setup */
rc = dasd_eckd_read_conf(device);
if (rc)
goto out_err3;
dasd_eckd_read_fc_security(device);
dasd_path_create_kobjects(device);
/* Read Feature Codes */
dasd_eckd_read_features(device);
/* Read Volume Information */
dasd_eckd_read_vol_info(device);
/* Read Extent Pool Information */
dasd_eckd_read_ext_pool_info(device);
if ((device->features & DASD_FEATURE_USERAW) &&
!(private->rdc_data.facilities.RT_in_LR)) {
dev_err(&device->cdev->dev, "The storage server does not "
"support raw-track access\n");
rc = -EINVAL;
goto out_err3;
}
/* find the valid cylinder size */
if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
private->rdc_data.long_no_cyl)
private->real_cyl = private->rdc_data.long_no_cyl;
else
private->real_cyl = private->rdc_data.no_cyl;
private->fcx_max_data = get_fcx_max_data(device);
readonly = dasd_device_is_ro(device);
if (readonly)
set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
"with %d cylinders, %d heads, %d sectors%s\n",
private->rdc_data.dev_type,
private->rdc_data.dev_model,
private->rdc_data.cu_type,
private->rdc_data.cu_model.model,
private->real_cyl,
private->rdc_data.trk_per_cyl,
private->rdc_data.sec_per_trk,
readonly ? ", read-only device" : "");
return 0;
out_err3:
dasd_alias_disconnect_device_from_lcu(device);
out_err2:
dasd_free_block(device->block);
device->block = NULL;
out_err1:
dasd_eckd_clear_conf_data(device);
dasd_path_remove_kobjects(device);
kfree(device->private);
device->private = NULL;
return rc;
}
static void dasd_eckd_uncheck_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
if (!private)
return;
dasd_alias_disconnect_device_from_lcu(device);
private->conf.ned = NULL;
private->conf.sneq = NULL;
private->conf.vdsneq = NULL;
private->conf.gneq = NULL;
dasd_eckd_clear_conf_data(device);
dasd_path_remove_kobjects(device);
}
static struct dasd_ccw_req *
dasd_eckd_analysis_ccw(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct eckd_count *count_data;
struct LO_eckd_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int cplength, datasize;
int i;
cplength = 8;
datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
NULL);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
/* Define extent for the first 2 tracks. */
define_extent(ccw++, cqr->data, 0, 1,
DASD_ECKD_CCW_READ_COUNT, device, 0);
LO_data = cqr->data + sizeof(struct DE_eckd_data);
/* Locate record for the first 4 records on track 0. */
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++, 0, 0, 4,
DASD_ECKD_CCW_READ_COUNT, device, 0);
count_data = private->count_area;
for (i = 0; i < 4; i++) {
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = 0;
ccw->count = 8;
ccw->cda = (__u32)virt_to_phys(count_data);
ccw++;
count_data++;
}
/* Locate record for the first record on track 1. */
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++, 1, 0, 1,
DASD_ECKD_CCW_READ_COUNT, device, 0);
/* Read count ccw. */
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = 0;
ccw->count = 8;
ccw->cda = (__u32)virt_to_phys(count_data);
cqr->block = NULL;
cqr->startdev = device;
cqr->memdev = device;
cqr->retries = 255;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
return cqr;
}
/* differentiate between 'no record found' and any other error */
static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
{
char *sense;
if (init_cqr->status == DASD_CQR_DONE)
return INIT_CQR_OK;
else if (init_cqr->status == DASD_CQR_NEED_ERP ||
init_cqr->status == DASD_CQR_FAILED) {
sense = dasd_get_sense(&init_cqr->irb);
if (sense && (sense[1] & SNS1_NO_REC_FOUND))
return INIT_CQR_UNFORMATTED;
else
return INIT_CQR_ERROR;
} else
return INIT_CQR_ERROR;
}
/*
* This is the callback function for the init_analysis cqr. It saves
* the status of the initial analysis ccw before it frees it and kicks
* the device to continue the startup sequence. This will call
* dasd_eckd_do_analysis again (if the devices has not been marked
* for deletion in the meantime).
*/
static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
void *data)
{
struct dasd_device *device = init_cqr->startdev;
struct dasd_eckd_private *private = device->private;
private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
dasd_sfree_request(init_cqr, device);
dasd_kick_device(device);
}
static int dasd_eckd_start_analysis(struct dasd_block *block)
{
struct dasd_ccw_req *init_cqr;
init_cqr = dasd_eckd_analysis_ccw(block->base);
if (IS_ERR(init_cqr))
return PTR_ERR(init_cqr);
init_cqr->callback = dasd_eckd_analysis_callback;
init_cqr->callback_data = NULL;
init_cqr->expires = 5*HZ;
/* first try without ERP, so we can later handle unformatted
* devices as special case
*/
clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
init_cqr->retries = 0;
dasd_add_request_head(init_cqr);
return -EAGAIN;
}
static int dasd_eckd_end_analysis(struct dasd_block *block)
{
struct dasd_device *device = block->base;
struct dasd_eckd_private *private = device->private;
struct eckd_count *count_area;
unsigned int sb, blk_per_trk;
int status, i;
struct dasd_ccw_req *init_cqr;
status = private->init_cqr_status;
private->init_cqr_status = -1;
if (status == INIT_CQR_ERROR) {
/* try again, this time with full ERP */
init_cqr = dasd_eckd_analysis_ccw(device);
dasd_sleep_on(init_cqr);
status = dasd_eckd_analysis_evaluation(init_cqr);
dasd_sfree_request(init_cqr, device);
}
if (device->features & DASD_FEATURE_USERAW) {
block->bp_block = DASD_RAW_BLOCKSIZE;
blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
block->s2b_shift = 3;
goto raw;
}
if (status == INIT_CQR_UNFORMATTED) {
dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
return -EMEDIUMTYPE;
} else if (status == INIT_CQR_ERROR) {
dev_err(&device->cdev->dev,
"Detecting the DASD disk layout failed because "
"of an I/O error\n");
return -EIO;
}
private->uses_cdl = 1;
/* Check Track 0 for Compatible Disk Layout */
count_area = NULL;
for (i = 0; i < 3; i++) {
if (private->count_area[i].kl != 4 ||
private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
private->count_area[i].cyl != 0 ||
private->count_area[i].head != count_area_head[i] ||
private->count_area[i].record != count_area_rec[i]) {
private->uses_cdl = 0;
break;
}
}
if (i == 3)
count_area = &private->count_area[3];
if (private->uses_cdl == 0) {
for (i = 0; i < 5; i++) {
if ((private->count_area[i].kl != 0) ||
(private->count_area[i].dl !=
private->count_area[0].dl) ||
private->count_area[i].cyl != 0 ||
private->count_area[i].head != count_area_head[i] ||
private->count_area[i].record != count_area_rec[i])
break;
}
if (i == 5)
count_area = &private->count_area[0];
} else {
if (private->count_area[3].record == 1)
dev_warn(&device->cdev->dev,
"Track 0 has no records following the VTOC\n");
}
if (count_area != NULL && count_area->kl == 0) {
/* we found notthing violating our disk layout */
if (dasd_check_blocksize(count_area->dl) == 0)
block->bp_block = count_area->dl;
}
if (block->bp_block == 0) {
dev_warn(&device->cdev->dev,
"The disk layout of the DASD is not supported\n");
return -EMEDIUMTYPE;
}
block->s2b_shift = 0; /* bits to shift 512 to get a block */
for (sb = 512; sb < block->bp_block; sb = sb << 1)
block->s2b_shift++;
blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
raw:
block->blocks = ((unsigned long) private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk);
dev_info(&device->cdev->dev,
"DASD with %u KB/block, %lu KB total size, %u KB/track, "
"%s\n", (block->bp_block >> 10),
(((unsigned long) private->real_cyl *
private->rdc_data.trk_per_cyl *
blk_per_trk * (block->bp_block >> 9)) >> 1),
((blk_per_trk * block->bp_block) >> 10),
private->uses_cdl ?
"compatible disk layout" : "linux disk layout");
return 0;
}
static int dasd_eckd_do_analysis(struct dasd_block *block)
{
struct dasd_eckd_private *private = block->base->private;
if (private->init_cqr_status < 0)
return dasd_eckd_start_analysis(block);
else
return dasd_eckd_end_analysis(block);
}
static int dasd_eckd_basic_to_ready(struct dasd_device *device)
{
return dasd_alias_add_device(device);
};
static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
if (cancel_work_sync(&device->reload_device))
dasd_put_device(device);
if (cancel_work_sync(&device->kick_validate))
dasd_put_device(device);
return 0;
};
static int dasd_eckd_basic_to_known(struct dasd_device *device)
{
return dasd_alias_remove_device(device);
};
static int
dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
{
struct dasd_eckd_private *private = block->base->private;
if (dasd_check_blocksize(block->bp_block) == 0) {
geo->sectors = recs_per_track(&private->rdc_data,
0, block->bp_block);
}
geo->cylinders = private->rdc_data.no_cyl;
geo->heads = private->rdc_data.trk_per_cyl;
return 0;
}
/*
* Build the TCW request for the format check
*/
static struct dasd_ccw_req *
dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
int enable_pav, struct eckd_count *fmt_buffer,
int rpt)
{
struct dasd_eckd_private *start_priv;
struct dasd_device *startdev = NULL;
struct tidaw *last_tidaw = NULL;
struct dasd_ccw_req *cqr;
struct itcw *itcw;
int itcw_size;
int count;
int rc;
int i;
if (enable_pav)
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
start_priv = startdev->private;
count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
/*
* we're adding 'count' amount of tidaw to the itcw.
* calculate the corresponding itcw_size
*/
itcw_size = itcw_calc_size(0, count, 0);
cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
if (IS_ERR(cqr))
return cqr;
start_priv->count++;
itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
if (IS_ERR(itcw)) {
rc = -EINVAL;
goto out_err;
}
cqr->cpaddr = itcw_get_tcw(itcw);
rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
sizeof(struct eckd_count),
count * sizeof(struct eckd_count), 0, rpt);
if (rc)
goto out_err;
for (i = 0; i < count; i++) {
last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
sizeof(struct eckd_count));
if (IS_ERR(last_tidaw)) {
rc = -EINVAL;
goto out_err;
}
}
last_tidaw->flags |= TIDAW_FLAGS_LAST;
itcw_finalize(itcw);
cqr->cpmode = 1;
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->basedev = base;
cqr->retries = startdev->default_retries;
cqr->expires = startdev->default_expires * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
return cqr;
out_err:
dasd_sfree_request(cqr, startdev);
return ERR_PTR(rc);
}
/*
* Build the CCW request for the format check
*/
static struct dasd_ccw_req *
dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
int enable_pav, struct eckd_count *fmt_buffer, int rpt)
{
struct dasd_eckd_private *start_priv;
struct dasd_eckd_private *base_priv;
struct dasd_device *startdev = NULL;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
void *data;
int cplength, datasize;
int use_prefix;
int count;
int i;
if (enable_pav)
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
start_priv = startdev->private;
base_priv = base->private;
count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
use_prefix = base_priv->features.feature[8] & 0x01;
if (use_prefix) {
cplength = 1;
datasize = sizeof(struct PFX_eckd_data);
} else {
cplength = 2;
datasize = sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data);
}
cplength += count;
cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
if (IS_ERR(cqr))
return cqr;
start_priv->count++;
data = cqr->data;
ccw = cqr->cpaddr;
if (use_prefix) {
prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
count, 0, 0);
} else {
define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_READ_COUNT, startdev, 0);
data += sizeof(struct DE_eckd_data);
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, data, fdata->start_unit, 0, count,
DASD_ECKD_CCW_READ_COUNT, base, 0);
}
for (i = 0; i < count; i++) {
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = (__u32)virt_to_phys(fmt_buffer);
ccw++;
fmt_buffer++;
}
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->basedev = base;
cqr->retries = DASD_RETRIES;
cqr->expires = startdev->default_expires * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
return cqr;
}
static struct dasd_ccw_req *
dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
struct format_data_t *fdata, int enable_pav)
{
struct dasd_eckd_private *base_priv;
struct dasd_eckd_private *start_priv;
struct dasd_ccw_req *fcp;
struct eckd_count *ect;
struct ch_t address;
struct ccw1 *ccw;
void *data;
int rpt;
int cplength, datasize;
int i, j;
int intensity = 0;
int r0_perm;
int nr_tracks;
int use_prefix;
if (enable_pav)
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
start_priv = startdev->private;
base_priv = base->private;
rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
/*
* fdata->intensity is a bit string that tells us what to do:
* Bit 0: write record zero
* Bit 1: write home address, currently not supported
* Bit 2: invalidate tracks
* Bit 3: use OS/390 compatible disk layout (cdl)
* Bit 4: do not allow storage subsystem to modify record zero
* Only some bit combinations do make sense.
*/
if (fdata->intensity & 0x10) {
r0_perm = 0;
intensity = fdata->intensity & ~0x10;
} else {
r0_perm = 1;
intensity = fdata->intensity;
}
use_prefix = base_priv->features.feature[8] & 0x01;
switch (intensity) {
case 0x00: /* Normal format */
case 0x08: /* Normal format, use cdl. */
cplength = 2 + (rpt*nr_tracks);
if (use_prefix)
datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
rpt * nr_tracks * sizeof(struct eckd_count);
else
datasize = sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data) +
rpt * nr_tracks * sizeof(struct eckd_count);
break;
case 0x01: /* Write record zero and format track. */
case 0x09: /* Write record zero and format track, use cdl. */
cplength = 2 + rpt * nr_tracks;
if (use_prefix)
datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
sizeof(struct eckd_count) +
rpt * nr_tracks * sizeof(struct eckd_count);
else
datasize = sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data) +
sizeof(struct eckd_count) +
rpt * nr_tracks * sizeof(struct eckd_count);
break;
case 0x04: /* Invalidate track. */
case 0x0c: /* Invalidate track, use cdl. */
cplength = 3;
if (use_prefix)
datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
sizeof(struct eckd_count);
else
datasize = sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data) +
sizeof(struct eckd_count);
break;
default:
dev_warn(&startdev->cdev->dev,
"An I/O control call used incorrect flags 0x%x\n",
fdata->intensity);
return ERR_PTR(-EINVAL);
}
fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
if (IS_ERR(fcp))
return fcp;
start_priv->count++;
data = fcp->data;
ccw = fcp->cpaddr;
switch (intensity & ~0x08) {
case 0x00: /* Normal format. */
if (use_prefix) {
prefix(ccw++, (struct PFX_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_CKD, base, startdev);
/* grant subsystem permission to format R0 */
if (r0_perm)
((struct PFX_eckd_data *)data)
->define_extent.ga_extended |= 0x04;
data += sizeof(struct PFX_eckd_data);
} else {
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
/* grant subsystem permission to format R0 */
if (r0_perm)
((struct DE_eckd_data *) data)
->ga_extended |= 0x04;
data += sizeof(struct DE_eckd_data);
}
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, rpt*nr_tracks,
DASD_ECKD_CCW_WRITE_CKD, base,
fdata->blksize);
data += sizeof(struct LO_eckd_data);
break;
case 0x01: /* Write record zero + format track. */
if (use_prefix) {
prefix(ccw++, (struct PFX_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_RECORD_ZERO,
base, startdev);
data += sizeof(struct PFX_eckd_data);
} else {
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
data += sizeof(struct DE_eckd_data);
}
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, rpt * nr_tracks + 1,
DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
base->block->bp_block);
data += sizeof(struct LO_eckd_data);
break;
case 0x04: /* Invalidate track. */
if (use_prefix) {
prefix(ccw++, (struct PFX_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_CKD, base, startdev);
data += sizeof(struct PFX_eckd_data);
} else {
define_extent(ccw++, (struct DE_eckd_data *) data,
fdata->start_unit, fdata->stop_unit,
DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
data += sizeof(struct DE_eckd_data);
}
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, (struct LO_eckd_data *) data,
fdata->start_unit, 0, 1,
DASD_ECKD_CCW_WRITE_CKD, base, 8);
data += sizeof(struct LO_eckd_data);
break;
}
for (j = 0; j < nr_tracks; j++) {
/* calculate cylinder and head for the current track */
set_ch_t(&address,
(fdata->start_unit + j) /
base_priv->rdc_data.trk_per_cyl,
(fdata->start_unit + j) %
base_priv->rdc_data.trk_per_cyl);
if (intensity & 0x01) { /* write record zero */
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
ect->cyl = address.cyl;
ect->head = address.head;
ect->record = 0;
ect->kl = 0;
ect->dl = 8;
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = (__u32)virt_to_phys(ect);
ccw++;
}
if ((intensity & ~0x08) & 0x04) { /* erase track */
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
ect->cyl = address.cyl;
ect->head = address.head;
ect->record = 1;
ect->kl = 0;
ect->dl = 0;
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = (__u32)virt_to_phys(ect);
} else { /* write remaining records */
for (i = 0; i < rpt; i++) {
ect = (struct eckd_count *) data;
data += sizeof(struct eckd_count);
ect->cyl = address.cyl;
ect->head = address.head;
ect->record = i + 1;
ect->kl = 0;
ect->dl = fdata->blksize;
/*
* Check for special tracks 0-1
* when formatting CDL
*/
if ((intensity & 0x08) &&
address.cyl == 0 && address.head == 0) {
if (i < 3) {
ect->kl = 4;
ect->dl = sizes_trk0[i] - 4;
}
}
if ((intensity & 0x08) &&
address.cyl == 0 && address.head == 1) {
ect->kl = 44;
ect->dl = LABEL_SIZE - 44;
}
ccw[-1].flags |= CCW_FLAG_CC;
if (i != 0 || j == 0)
ccw->cmd_code =
DASD_ECKD_CCW_WRITE_CKD;
else
ccw->cmd_code =
DASD_ECKD_CCW_WRITE_CKD_MT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = (__u32)virt_to_phys(ect);
ccw++;
}
}
}
fcp->startdev = startdev;
fcp->memdev = startdev;
fcp->basedev = base;
fcp->retries = 256;
fcp->expires = startdev->default_expires * HZ;
fcp->buildclk = get_tod_clock();
fcp->status = DASD_CQR_FILLED;
return fcp;
}
/*
* Wrapper function to build a CCW request depending on input data
*/
static struct dasd_ccw_req *
dasd_eckd_format_build_ccw_req(struct dasd_device *base,
struct format_data_t *fdata, int enable_pav,
int tpm, struct eckd_count *fmt_buffer, int rpt)
{
struct dasd_ccw_req *ccw_req;
if (!fmt_buffer) {
ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
} else {
if (tpm)
ccw_req = dasd_eckd_build_check_tcw(base, fdata,
enable_pav,
fmt_buffer, rpt);
else
ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
fmt_buffer, rpt);
}
return ccw_req;
}
/*
* Sanity checks on format_data
*/
static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
struct format_data_t *fdata)
{
struct dasd_eckd_private *private = base->private;
if (fdata->start_unit >=
(private->real_cyl * private->rdc_data.trk_per_cyl)) {
dev_warn(&base->cdev->dev,
"Start track number %u used in formatting is too big\n",
fdata->start_unit);
return -EINVAL;
}
if (fdata->stop_unit >=
(private->real_cyl * private->rdc_data.trk_per_cyl)) {
dev_warn(&base->cdev->dev,
"Stop track number %u used in formatting is too big\n",
fdata->stop_unit);
return -EINVAL;
}
if (fdata->start_unit > fdata->stop_unit) {
dev_warn(&base->cdev->dev,
"Start track %u used in formatting exceeds end track\n",
fdata->start_unit);
return -EINVAL;
}
if (dasd_check_blocksize(fdata->blksize) != 0) {
dev_warn(&base->cdev->dev,
"The DASD cannot be formatted with block size %u\n",
fdata->blksize);
return -EINVAL;
}
return 0;
}
/*
* This function will process format_data originally coming from an IOCTL
*/
static int dasd_eckd_format_process_data(struct dasd_device *base,
struct format_data_t *fdata,
int enable_pav, int tpm,
struct eckd_count *fmt_buffer, int rpt,
struct irb *irb)
{
struct dasd_eckd_private *private = base->private;
struct dasd_ccw_req *cqr, *n;
struct list_head format_queue;
struct dasd_device *device;
char *sense = NULL;
int old_start, old_stop, format_step;
int step, retry;
int rc;
rc = dasd_eckd_format_sanity_checks(base, fdata);
if (rc)
return rc;
INIT_LIST_HEAD(&format_queue);
old_start = fdata->start_unit;
old_stop = fdata->stop_unit;
if (!tpm && fmt_buffer != NULL) {
/* Command Mode / Format Check */
format_step = 1;
} else if (tpm && fmt_buffer != NULL) {
/* Transport Mode / Format Check */
format_step = DASD_CQR_MAX_CCW / rpt;
} else {
/* Normal Formatting */
format_step = DASD_CQR_MAX_CCW /
recs_per_track(&private->rdc_data, 0, fdata->blksize);
}
do {
retry = 0;
while (fdata->start_unit <= old_stop) {
step = fdata->stop_unit - fdata->start_unit + 1;
if (step > format_step) {
fdata->stop_unit =
fdata->start_unit + format_step - 1;
}
cqr = dasd_eckd_format_build_ccw_req(base, fdata,
enable_pav, tpm,
fmt_buffer, rpt);
if (IS_ERR(cqr)) {
rc = PTR_ERR(cqr);
if (rc == -ENOMEM) {
if (list_empty(&format_queue))
goto out;
/*
* not enough memory available, start
* requests retry after first requests
* were finished
*/
retry = 1;
break;
}
goto out_err;
}
list_add_tail(&cqr->blocklist, &format_queue);
if (fmt_buffer) {
step = fdata->stop_unit - fdata->start_unit + 1;
fmt_buffer += rpt * step;
}
fdata->start_unit = fdata->stop_unit + 1;
fdata->stop_unit = old_stop;
}
rc = dasd_sleep_on_queue(&format_queue);
out_err:
list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
device = cqr->startdev;
private = device->private;
if (cqr->status == DASD_CQR_FAILED) {
/*
* Only get sense data if called by format
* check
*/
if (fmt_buffer && irb) {
sense = dasd_get_sense(&cqr->irb);
memcpy(irb, &cqr->irb, sizeof(*irb));
}
rc = -EIO;
}
list_del_init(&cqr->blocklist);
dasd_ffree_request(cqr, device);
private->count--;
}
if (rc && rc != -EIO)
goto out;
if (rc == -EIO) {
/*
* In case fewer than the expected records are on the
* track, we will most likely get a 'No Record Found'
* error (in command mode) or a 'File Protected' error
* (in transport mode). Those particular cases shouldn't
* pass the -EIO to the IOCTL, therefore reset the rc
* and continue.
*/
if (sense &&
(sense[1] & SNS1_NO_REC_FOUND ||
sense[1] & SNS1_FILE_PROTECTED))
retry = 1;
else
goto out;
}
} while (retry);
out:
fdata->start_unit = old_start;
fdata->stop_unit = old_stop;
return rc;
}
static int dasd_eckd_format_device(struct dasd_device *base,
struct format_data_t *fdata, int enable_pav)
{
return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
0, NULL);
}
static bool test_and_set_format_track(struct dasd_format_entry *to_format,
struct dasd_ccw_req *cqr)
{
struct dasd_block *block = cqr->block;
struct dasd_format_entry *format;
unsigned long flags;
bool rc = false;
spin_lock_irqsave(&block->format_lock, flags);
if (cqr->trkcount != atomic_read(&block->trkcount)) {
/*
* The number of formatted tracks has changed after request
* start and we can not tell if the current track was involved.
* To avoid data corruption treat it as if the current track is
* involved
*/
rc = true;
goto out;
}
list_for_each_entry(format, &block->format_list, list) {
if (format->track == to_format->track) {
rc = true;
goto out;
}
}
list_add_tail(&to_format->list, &block->format_list);
out:
spin_unlock_irqrestore(&block->format_lock, flags);
return rc;
}
static void clear_format_track(struct dasd_format_entry *format,
struct dasd_block *block)
{
unsigned long flags;
spin_lock_irqsave(&block->format_lock, flags);
atomic_inc(&block->trkcount);
list_del_init(&format->list);
spin_unlock_irqrestore(&block->format_lock, flags);
}
/*
* Callback function to free ESE format requests.
*/
static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
{
struct dasd_device *device = cqr->startdev;
struct dasd_eckd_private *private = device->private;
struct dasd_format_entry *format = data;
clear_format_track(format, cqr->basedev->block);
private->count--;
dasd_ffree_request(cqr, device);
}
static struct dasd_ccw_req *
dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
struct irb *irb)
{
struct dasd_eckd_private *private;
struct dasd_format_entry *format;
struct format_data_t fdata;
unsigned int recs_per_trk;
struct dasd_ccw_req *fcqr;
struct dasd_device *base;
struct dasd_block *block;
unsigned int blksize;
struct request *req;
sector_t first_trk;
sector_t last_trk;
sector_t curr_trk;
int rc;
req = dasd_get_callback_data(cqr);
block = cqr->block;
base = block->base;
private = base->private;
blksize = block->bp_block;
recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
format = &startdev->format_entry;
first_trk = blk_rq_pos(req) >> block->s2b_shift;
sector_div(first_trk, recs_per_trk);
last_trk =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
sector_div(last_trk, recs_per_trk);
rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
if (rc)
return ERR_PTR(rc);
if (curr_trk < first_trk || curr_trk > last_trk) {
DBF_DEV_EVENT(DBF_WARNING, startdev,
"ESE error track %llu not within range %llu - %llu\n",
curr_trk, first_trk, last_trk);
return ERR_PTR(-EINVAL);
}
format->track = curr_trk;
/* test if track is already in formatting by another thread */
if (test_and_set_format_track(format, cqr)) {
/* this is no real error so do not count down retries */
cqr->retries++;
return ERR_PTR(-EEXIST);
}
fdata.start_unit = curr_trk;
fdata.stop_unit = curr_trk;
fdata.blksize = blksize;
fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
rc = dasd_eckd_format_sanity_checks(base, &fdata);
if (rc)
return ERR_PTR(-EINVAL);
/*
* We're building the request with PAV disabled as we're reusing
* the former startdev.
*/
fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
if (IS_ERR(fcqr))
return fcqr;
fcqr->callback = dasd_eckd_ese_format_cb;
fcqr->callback_data = (void *) format;
return fcqr;
}
/*
* When data is read from an unformatted area of an ESE volume, this function
* returns zeroed data and thereby mimics a read of zero data.
*
* The first unformatted track is the one that got the NRF error, the address is
* encoded in the sense data.
*
* All tracks before have returned valid data and should not be touched.
* All tracks after the unformatted track might be formatted or not. This is
* currently not known, remember the processed data and return the remainder of
* the request to the blocklayer in __dasd_cleanup_cqr().
*/
static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
{
struct dasd_eckd_private *private;
sector_t first_trk, last_trk;
sector_t first_blk, last_blk;
unsigned int blksize, off;
unsigned int recs_per_trk;
struct dasd_device *base;
struct req_iterator iter;
struct dasd_block *block;
unsigned int skip_block;
unsigned int blk_count;
struct request *req;
struct bio_vec bv;
sector_t curr_trk;
sector_t end_blk;
char *dst;
int rc;
req = (struct request *) cqr->callback_data;
base = cqr->block->base;
blksize = base->block->bp_block;
block = cqr->block;
private = base->private;
skip_block = 0;
blk_count = 0;
recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
sector_div(first_trk, recs_per_trk);
last_trk = last_blk =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
sector_div(last_trk, recs_per_trk);
rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
if (rc)
return rc;
/* sanity check if the current track from sense data is valid */
if (curr_trk < first_trk || curr_trk > last_trk) {
DBF_DEV_EVENT(DBF_WARNING, base,
"ESE error track %llu not within range %llu - %llu\n",
curr_trk, first_trk, last_trk);
return -EINVAL;
}
/*
* if not the first track got the NRF error we have to skip over valid
* blocks
*/
if (curr_trk != first_trk)
skip_block = curr_trk * recs_per_trk - first_blk;
/* we have no information beyond the current track */
end_blk = (curr_trk + 1) * recs_per_trk;
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
if (first_blk + blk_count >= end_blk) {
cqr->proc_bytes = blk_count * blksize;
return 0;
}
if (dst && !skip_block)
memset(dst, 0, blksize);
else
skip_block--;
dst += blksize;
blk_count++;
}
}
return 0;
}
/*
* Helper function to count consecutive records of a single track.
*/
static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
int max)
{
int head;
int i;
head = fmt_buffer[start].head;
/*
* There are 3 conditions where we stop counting:
* - if data reoccurs (same head and record may reoccur), which may
* happen due to the way DASD_ECKD_CCW_READ_COUNT works
* - when the head changes, because we're iterating over several tracks
* then (DASD_ECKD_CCW_READ_COUNT_MT)
* - when we've reached the end of sensible data in the buffer (the
* record will be 0 then)
*/
for (i = start; i < max; i++) {
if (i > start) {
if ((fmt_buffer[i].head == head &&
fmt_buffer[i].record == 1) ||
fmt_buffer[i].head != head ||
fmt_buffer[i].record == 0)
break;
}
}
return i - start;
}
/*
* Evaluate a given range of tracks. Data like number of records, blocksize,
* record ids, and key length are compared with expected data.
*
* If a mismatch occurs, the corresponding error bit is set, as well as
* additional information, depending on the error.
*/
static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
struct format_check_t *cdata,
int rpt_max, int rpt_exp,
int trk_per_cyl, int tpm)
{
struct ch_t geo;
int max_entries;
int count = 0;
int trkcount;
int blksize;
int pos = 0;
int i, j;
int kl;
trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
max_entries = trkcount * rpt_max;
for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
/* Calculate the correct next starting position in the buffer */
if (tpm) {
while (fmt_buffer[pos].record == 0 &&
fmt_buffer[pos].dl == 0) {
if (pos++ > max_entries)
break;
}
} else {
if (i != cdata->expect.start_unit)
pos += rpt_max - count;
}
/* Calculate the expected geo values for the current track */
set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
/* Count and check number of records */
count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
if (count < rpt_exp) {
cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
break;
}
if (count > rpt_exp) {
cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
break;
}
for (j = 0; j < count; j++, pos++) {
blksize = cdata->expect.blksize;
kl = 0;
/*
* Set special values when checking CDL formatted
* devices.
*/
if ((cdata->expect.intensity & 0x08) &&
geo.cyl == 0 && geo.head == 0) {
if (j < 3) {
blksize = sizes_trk0[j] - 4;
kl = 4;
}
}
if ((cdata->expect.intensity & 0x08) &&
geo.cyl == 0 && geo.head == 1) {
blksize = LABEL_SIZE - 44;
kl = 44;
}
/* Check blocksize */
if (fmt_buffer[pos].dl != blksize) {
cdata->result = DASD_FMT_ERR_BLKSIZE;
goto out;
}
/* Check if key length is 0 */
if (fmt_buffer[pos].kl != kl) {
cdata->result = DASD_FMT_ERR_KEY_LENGTH;
goto out;
}
/* Check if record_id is correct */
if (fmt_buffer[pos].cyl != geo.cyl ||
fmt_buffer[pos].head != geo.head ||
fmt_buffer[pos].record != (j + 1)) {
cdata->result = DASD_FMT_ERR_RECORD_ID;
goto out;
}
}
}
out:
/*
* In case of no errors, we need to decrease by one
* to get the correct positions.
*/
if (!cdata->result) {
i--;
pos--;
}
cdata->unit = i;
cdata->num_records = count;
cdata->rec = fmt_buffer[pos].record;
cdata->blksize = fmt_buffer[pos].dl;
cdata->key_length = fmt_buffer[pos].kl;
}
/*
* Check the format of a range of tracks of a DASD.
*/
static int dasd_eckd_check_device_format(struct dasd_device *base,
struct format_check_t *cdata,
int enable_pav)
{
struct dasd_eckd_private *private = base->private;
struct eckd_count *fmt_buffer;
struct irb irb;
int rpt_max, rpt_exp;
int fmt_buffer_size;
int trk_per_cyl;
int trkcount;
int tpm = 0;
int rc;
trk_per_cyl = private->rdc_data.trk_per_cyl;
/* Get maximum and expected amount of records per track */
rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
if (!fmt_buffer)
return -ENOMEM;
/*
* A certain FICON feature subset is needed to operate in transport
* mode. Additionally, the support for transport mode is implicitly
* checked by comparing the buffer size with fcx_max_data. As long as
* the buffer size is smaller we can operate in transport mode and
* process multiple tracks. If not, only one track at once is being
* processed using command mode.
*/
if ((private->features.feature[40] & 0x04) &&
fmt_buffer_size <= private->fcx_max_data)
tpm = 1;
rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
tpm, fmt_buffer, rpt_max, &irb);
if (rc && rc != -EIO)
goto out;
if (rc == -EIO) {
/*
* If our first attempt with transport mode enabled comes back
* with an incorrect length error, we're going to retry the
* check with command mode.
*/
if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
tpm = 0;
rc = dasd_eckd_format_process_data(base, &cdata->expect,
enable_pav, tpm,
fmt_buffer, rpt_max,
&irb);
if (rc)
goto out;
} else {
goto out;
}
}
dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
trk_per_cyl, tpm);
out:
kfree(fmt_buffer);
return rc;
}
static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
{
if (cqr->retries < 0) {
cqr->status = DASD_CQR_FAILED;
return;
}
cqr->status = DASD_CQR_FILLED;
if (cqr->block && (cqr->startdev != cqr->block->base)) {
dasd_eckd_reset_ccw_to_base_io(cqr);
cqr->startdev = cqr->block->base;
cqr->lpm = dasd_path_get_opm(cqr->block->base);
}
};
static dasd_erp_fn_t
dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
{
struct dasd_device *device = (struct dasd_device *) cqr->startdev;
struct ccw_device *cdev = device->cdev;
switch (cdev->id.cu_type) {
case 0x3990:
case 0x2105:
case 0x2107:
case 0x1750:
return dasd_3990_erp_action;
case 0x9343:
case 0x3880:
default:
return dasd_default_erp_action;
}
}
static dasd_erp_fn_t
dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
{
return dasd_default_erp_postaction;
}
static void dasd_eckd_check_for_device_change(struct dasd_device *device,
struct dasd_ccw_req *cqr,
struct irb *irb)
{
char mask;
char *sense = NULL;
struct dasd_eckd_private *private = device->private;
/* first of all check for state change pending interrupt */
mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
if ((scsw_dstat(&irb->scsw) & mask) == mask) {
/*
* for alias only, not in offline processing
* and only if not suspended
*/
if (!device->block && private->lcu &&
device->state == DASD_STATE_ONLINE &&
!test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
!test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
/* schedule worker to reload device */
dasd_reload_device(device);
}
dasd_generic_handle_state_change(device);
return;
}
sense = dasd_get_sense(irb);
if (!sense)
return;
/* summary unit check */
if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
(scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"eckd suc: device already notified");
return;
}
sense = dasd_get_sense(irb);
if (!sense) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"eckd suc: no reason code available");
clear_bit(DASD_FLAG_SUC, &device->flags);
return;
}
private->suc_reason = sense[8];
DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
"eckd handle summary unit check: reason",
private->suc_reason);
dasd_get_device(device);
if (!schedule_work(&device->suc_work))
dasd_put_device(device);
return;
}
/* service information message SIM */
if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
dasd_3990_erp_handle_sim(device, sense);
return;
}
/* loss of device reservation is handled via base devices only
* as alias devices may be used with several bases
*/
if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
(sense[7] == 0x3F) &&
(scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
if (device->features & DASD_FEATURE_FAILONSLCK)
set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
dev_err(&device->cdev->dev,
"The device reservation was lost\n");
}
}
static int dasd_eckd_ras_sanity_checks(struct dasd_device *device,
unsigned int first_trk,
unsigned int last_trk)
{
struct dasd_eckd_private *private = device->private;
unsigned int trks_per_vol;
int rc = 0;
trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl;
if (first_trk >= trks_per_vol) {
dev_warn(&device->cdev->dev,
"Start track number %u used in the space release command is too big\n",
first_trk);
rc = -EINVAL;
} else if (last_trk >= trks_per_vol) {
dev_warn(&device->cdev->dev,
"Stop track number %u used in the space release command is too big\n",
last_trk);
rc = -EINVAL;
} else if (first_trk > last_trk) {
dev_warn(&device->cdev->dev,
"Start track %u used in the space release command exceeds the end track\n",
first_trk);
rc = -EINVAL;
}
return rc;
}
/*
* Helper function to count the amount of involved extents within a given range
* with extent alignment in mind.
*/
static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
{
int cur_pos = 0;
int count = 0;
int tmp;
if (from == to)
return 1;
/* Count first partial extent */
if (from % trks_per_ext != 0) {
tmp = from + trks_per_ext - (from % trks_per_ext) - 1;
if (tmp > to)
tmp = to;
cur_pos = tmp - from + 1;
count++;
}
/* Count full extents */
if (to - (from + cur_pos) + 1 >= trks_per_ext) {
tmp = to - ((to - trks_per_ext + 1) % trks_per_ext);
count += (tmp - (from + cur_pos) + 1) / trks_per_ext;
cur_pos = tmp;
}
/* Count last partial extent */
if (cur_pos < to)
count++;
return count;
}
static int dasd_in_copy_relation(struct dasd_device *device)
{
struct dasd_pprc_data_sc4 *temp;
int rc;
if (!dasd_eckd_pprc_enabled(device))
return 0;
temp = kzalloc(sizeof(*temp), GFP_KERNEL);
if (!temp)
return -ENOMEM;
rc = dasd_eckd_query_pprc_status(device, temp);
if (!rc)
rc = temp->dev_info[0].state;
kfree(temp);
return rc;
}
/*
* Release allocated space for a given range or an entire volume.
*/
static struct dasd_ccw_req *
dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
struct request *req, unsigned int first_trk,
unsigned int last_trk, int by_extent)
{
struct dasd_eckd_private *private = device->private;
struct dasd_dso_ras_ext_range *ras_range;
struct dasd_rssd_features *features;
struct dasd_dso_ras_data *ras_data;
u16 heads, beg_head, end_head;
int cur_to_trk, cur_from_trk;
struct dasd_ccw_req *cqr;
u32 beg_cyl, end_cyl;
int copy_relation;
struct ccw1 *ccw;
int trks_per_ext;
size_t ras_size;
size_t size;
int nr_exts;
void *rq;
int i;
if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
return ERR_PTR(-EINVAL);
copy_relation = dasd_in_copy_relation(device);
if (copy_relation < 0)
return ERR_PTR(copy_relation);
rq = req ? blk_mq_rq_to_pdu(req) : NULL;
features = &private->features;
trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
nr_exts = 0;
if (by_extent)
nr_exts = count_exts(first_trk, last_trk, trks_per_ext);
ras_size = sizeof(*ras_data);
size = ras_size + (nr_exts * sizeof(*ras_range));
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate RAS request");
return cqr;
}
ras_data = cqr->data;
memset(ras_data, 0, size);
ras_data->order = DSO_ORDER_RAS;
ras_data->flags.vol_type = 0; /* CKD volume */
/* Release specified extents or entire volume */
ras_data->op_flags.by_extent = by_extent;
/*
* This bit guarantees initialisation of tracks within an extent that is
* not fully specified, but is only supported with a certain feature
* subset and for devices not in a copy relation.
*/
if (features->feature[56] & 0x01 && !copy_relation)
ras_data->op_flags.guarantee_init = 1;
ras_data->lss = private->conf.ned->ID;
ras_data->dev_addr = private->conf.ned->unit_addr;
ras_data->nr_exts = nr_exts;
if (by_extent) {
heads = private->rdc_data.trk_per_cyl;
cur_from_trk = first_trk;
cur_to_trk = first_trk + trks_per_ext -
(first_trk % trks_per_ext) - 1;
if (cur_to_trk > last_trk)
cur_to_trk = last_trk;
ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size);
for (i = 0; i < nr_exts; i++) {
beg_cyl = cur_from_trk / heads;
beg_head = cur_from_trk % heads;
end_cyl = cur_to_trk / heads;
end_head = cur_to_trk % heads;
set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head);
set_ch_t(&ras_range->end_ext, end_cyl, end_head);
cur_from_trk = cur_to_trk + 1;
cur_to_trk = cur_from_trk + trks_per_ext - 1;
if (cur_to_trk > last_trk)
cur_to_trk = last_trk;
ras_range++;
}
}
ccw = cqr->cpaddr;
ccw->cda = (__u32)virt_to_phys(cqr->data);
ccw->cmd_code = DASD_ECKD_CCW_DSO;
ccw->count = size;
cqr->startdev = device;
cqr->memdev = device;
cqr->block = block;
cqr->retries = 256;
cqr->expires = device->default_expires * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
static int dasd_eckd_release_space_full(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
rc = dasd_sleep_on_interruptible(cqr);
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
static int dasd_eckd_release_space_trks(struct dasd_device *device,
unsigned int from, unsigned int to)
{
struct dasd_eckd_private *private = device->private;
struct dasd_block *block = device->block;
struct dasd_ccw_req *cqr, *n;
struct list_head ras_queue;
unsigned int device_exts;
int trks_per_ext;
int stop, step;
int cur_pos;
int rc = 0;
int retry;
INIT_LIST_HEAD(&ras_queue);
device_exts = private->real_cyl / dasd_eckd_ext_size(device);
trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl;
/* Make sure device limits are not exceeded */
step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX);
cur_pos = from;
do {
retry = 0;
while (cur_pos < to) {
stop = cur_pos + step -
((cur_pos + step) % trks_per_ext) - 1;
if (stop > to)
stop = to;
cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1);
if (IS_ERR(cqr)) {
rc = PTR_ERR(cqr);
if (rc == -ENOMEM) {
if (list_empty(&ras_queue))
goto out;
retry = 1;
break;
}
goto err_out;
}
spin_lock_irq(&block->queue_lock);
list_add_tail(&cqr->blocklist, &ras_queue);
spin_unlock_irq(&block->queue_lock);
cur_pos = stop + 1;
}
rc = dasd_sleep_on_queue_interruptible(&ras_queue);
err_out:
list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) {
device = cqr->startdev;
private = device->private;
spin_lock_irq(&block->queue_lock);
list_del_init(&cqr->blocklist);
spin_unlock_irq(&block->queue_lock);
dasd_sfree_request(cqr, device);
private->count--;
}
} while (retry);
out:
return rc;
}
static int dasd_eckd_release_space(struct dasd_device *device,
struct format_data_t *rdata)
{
if (rdata->intensity & DASD_FMT_INT_ESE_FULL)
return dasd_eckd_release_space_full(device);
else if (rdata->intensity == 0)
return dasd_eckd_release_space_trks(device, rdata->start_unit,
rdata->stop_unit);
else
return -EINVAL;
}
static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
struct dasd_device *startdev,
struct dasd_block *block,
struct request *req,
sector_t first_rec,
sector_t last_rec,
sector_t first_trk,
sector_t last_trk,
unsigned int first_offs,
unsigned int last_offs,
unsigned int blk_per_trk,
unsigned int blksize)
{
struct dasd_eckd_private *private;
unsigned long *idaws;
struct LO_eckd_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec bv;
char *dst;
unsigned int off;
int count, cidaw, cplength, datasize;
sector_t recid;
unsigned char cmd, rcmd;
int use_prefix;
struct dasd_device *basedev;
basedev = block->base;
private = basedev->private;
if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_MT;
else if (rq_data_dir(req) == WRITE)
cmd = DASD_ECKD_CCW_WRITE_MT;
else
return ERR_PTR(-EINVAL);
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
rq_for_each_segment(bv, req, iter) {
if (bv.bv_len & (blksize - 1))
/* Eckd can only do full blocks. */
return ERR_PTR(-EINVAL);
count += bv.bv_len >> (block->s2b_shift + 9);
if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
cidaw += bv.bv_len >> (block->s2b_shift + 9);
}
/* Paranoia. */
if (count != last_rec - first_rec + 1)
return ERR_PTR(-EINVAL);
/* use the prefix command if available */
use_prefix = private->features.feature[8] & 0x01;
if (use_prefix) {
/* 1x prefix + number of blocks */
cplength = 2 + count;
/* 1x prefix + cidaws*sizeof(long) */
datasize = sizeof(struct PFX_eckd_data) +
sizeof(struct LO_eckd_data) +
cidaw * sizeof(unsigned long);
} else {
/* 1x define extent + 1x locate record + number of blocks */
cplength = 2 + count;
/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
datasize = sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data) +
cidaw * sizeof(unsigned long);
}
/* Find out the number of additional locate record ccws for cdl. */
if (private->uses_cdl && first_rec < 2*blk_per_trk) {
if (last_rec >= 2*blk_per_trk)
count = 2*blk_per_trk - first_rec;
cplength += count;
datasize += count*sizeof(struct LO_eckd_data);
}
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
startdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
/* First ccw is define extent or prefix. */
if (use_prefix) {
if (prefix(ccw++, cqr->data, first_trk,
last_trk, cmd, basedev, startdev) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
idaws = (unsigned long *) (cqr->data +
sizeof(struct PFX_eckd_data));
} else {
if (define_extent(ccw++, cqr->data, first_trk,
last_trk, cmd, basedev, 0) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
idaws = (unsigned long *) (cqr->data +
sizeof(struct DE_eckd_data));
}
/* Build locate_record+read/write/ccws. */
LO_data = (struct LO_eckd_data *) (idaws + cidaw);
recid = first_rec;
if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
/* Only standard blocks so there is just one locate record. */
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
last_rec - recid + 1, cmd, basedev, blksize);
}
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
if (dasd_page_cache) {
char *copy = kmem_cache_alloc(dasd_page_cache,
GFP_DMA | __GFP_NOWARN);
if (copy && rq_data_dir(req) == WRITE)
memcpy(copy + bv.bv_offset, dst, bv.bv_len);
if (copy)
dst = copy + bv.bv_offset;
}
for (off = 0; off < bv.bv_len; off += blksize) {
sector_t trkid = recid;
unsigned int recoffs = sector_div(trkid, blk_per_trk);
rcmd = cmd;
count = blksize;
/* Locate record for cdl special block ? */
if (private->uses_cdl && recid < 2*blk_per_trk) {
if (dasd_eckd_cdl_special(blk_per_trk, recid)){
rcmd |= 0x8;
count = dasd_eckd_cdl_reclen(recid);
if (count < blksize &&
rq_data_dir(req) == READ)
memset(dst + count, 0xe5,
blksize - count);
}
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++,
trkid, recoffs + 1,
1, rcmd, basedev, count);
}
/* Locate record for standard blocks ? */
if (private->uses_cdl && recid == 2*blk_per_trk) {
ccw[-1].flags |= CCW_FLAG_CC;
locate_record(ccw++, LO_data++,
trkid, recoffs + 1,
last_rec - recid + 1,
cmd, basedev, count);
}
/* Read/write ccw. */
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = rcmd;
ccw->count = count;
if (idal_is_needed(dst, blksize)) {
ccw->cda = (__u32)virt_to_phys(idaws);
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
ccw->cda = (__u32)virt_to_phys(dst);
ccw->flags = 0;
}
ccw++;
dst += blksize;
recid++;
}
}
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
if (dasd_eckd_is_ese(basedev)) {
set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
}
return cqr;
}
static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
struct dasd_device *startdev,
struct dasd_block *block,
struct request *req,
sector_t first_rec,
sector_t last_rec,
sector_t first_trk,
sector_t last_trk,
unsigned int first_offs,
unsigned int last_offs,
unsigned int blk_per_trk,
unsigned int blksize)
{
unsigned long *idaws;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec bv;
char *dst, *idaw_dst;
unsigned int cidaw, cplength, datasize;
unsigned int tlf;
sector_t recid;
unsigned char cmd;
struct dasd_device *basedev;
unsigned int trkcount, count, count_to_trk_end;
unsigned int idaw_len, seg_len, part_len, len_to_track_end;
unsigned char new_track, end_idaw;
sector_t trkid;
unsigned int recoffs;
basedev = block->base;
if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
else if (rq_data_dir(req) == WRITE)
cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
else
return ERR_PTR(-EINVAL);
/* Track based I/O needs IDAWs for each page, and not just for
* 64 bit addresses. We need additional idals for pages
* that get filled from two tracks, so we use the number
* of records as upper limit.
*/
cidaw = last_rec - first_rec + 1;
trkcount = last_trk - first_trk + 1;
/* 1x prefix + one read/write ccw per track */
cplength = 1 + trkcount;
datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
startdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
/* transfer length factor: how many bytes to read from the last track */
if (first_trk == last_trk)
tlf = last_offs - first_offs + 1;
else
tlf = last_offs + 1;
tlf *= blksize;
if (prefix_LRE(ccw++, cqr->data, first_trk,
last_trk, cmd, basedev, startdev,
1 /* format */, first_offs + 1,
trkcount, blksize,
tlf) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
/*
* The translation of request into ccw programs must meet the
* following conditions:
* - all idaws but the first and the last must address full pages
* (or 2K blocks on 31-bit)
* - the scope of a ccw and it's idal ends with the track boundaries
*/
idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
recid = first_rec;
new_track = 1;
end_idaw = 0;
len_to_track_end = 0;
idaw_dst = NULL;
idaw_len = 0;
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
seg_len = bv.bv_len;
while (seg_len) {
if (new_track) {
trkid = recid;
recoffs = sector_div(trkid, blk_per_trk);
count_to_trk_end = blk_per_trk - recoffs;
count = min((last_rec - recid + 1),
(sector_t)count_to_trk_end);
len_to_track_end = count * blksize;
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd;
ccw->count = len_to_track_end;
ccw->cda = (__u32)virt_to_phys(idaws);
ccw->flags = CCW_FLAG_IDA;
ccw++;
recid += count;
new_track = 0;
/* first idaw for a ccw may start anywhere */
if (!idaw_dst)
idaw_dst = dst;
}
/* If we start a new idaw, we must make sure that it
* starts on an IDA_BLOCK_SIZE boundary.
* If we continue an idaw, we must make sure that the
* current segment begins where the so far accumulated
* idaw ends
*/
if (!idaw_dst) {
if ((__u32)virt_to_phys(dst) & (IDA_BLOCK_SIZE - 1)) {
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-ERANGE);
} else
idaw_dst = dst;
}
if ((idaw_dst + idaw_len) != dst) {
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-ERANGE);
}
part_len = min(seg_len, len_to_track_end);
seg_len -= part_len;
dst += part_len;
idaw_len += part_len;
len_to_track_end -= part_len;
/* collected memory area ends on an IDA_BLOCK border,
* -> create an idaw
* idal_create_words will handle cases where idaw_len
* is larger then IDA_BLOCK_SIZE
*/
if (!((__u32)virt_to_phys(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
end_idaw = 1;
/* We also need to end the idaw at track end */
if (!len_to_track_end) {
new_track = 1;
end_idaw = 1;
}
if (end_idaw) {
idaws = idal_create_words(idaws, idaw_dst,
idaw_len);
idaw_dst = NULL;
idaw_len = 0;
end_idaw = 0;
}
}
}
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
if (dasd_eckd_is_ese(basedev))
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
return cqr;
}
static int prepare_itcw(struct itcw *itcw,
unsigned int trk, unsigned int totrk, int cmd,
struct dasd_device *basedev,
struct dasd_device *startdev,
unsigned int rec_on_trk, int count,
unsigned int blksize,
unsigned int total_data_size,
unsigned int tlf,
unsigned int blk_per_trk)
{
struct PFX_eckd_data pfxdata;
struct dasd_eckd_private *basepriv, *startpriv;
struct DE_eckd_data *dedata;
struct LRE_eckd_data *lredata;
struct dcw *dcw;
u32 begcyl, endcyl;
u16 heads, beghead, endhead;
u8 pfx_cmd;
int rc = 0;
int sector = 0;
int dn, d;
/* setup prefix data */
basepriv = basedev->private;
startpriv = startdev->private;
dedata = &pfxdata.define_extent;
lredata = &pfxdata.locate_record;
memset(&pfxdata, 0, sizeof(pfxdata));
pfxdata.format = 1; /* PFX with LRE */
pfxdata.base_address = basepriv->conf.ned->unit_addr;
pfxdata.base_lss = basepriv->conf.ned->ID;
pfxdata.validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
pfxdata.validity.verify_base = 1;
if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
pfxdata.validity.verify_base = 1;
pfxdata.validity.hyper_pav = 1;
}
switch (cmd) {
case DASD_ECKD_CCW_READ_TRACK_DATA:
dedata->mask.perm = 0x1;
dedata->attributes.operation = basepriv->attrib.operation;
dedata->blk_size = blksize;
dedata->ga_extended |= 0x42;
lredata->operation.orientation = 0x0;
lredata->operation.operation = 0x0C;
lredata->auxiliary.check_bytes = 0x01;
pfx_cmd = DASD_ECKD_CCW_PFX_READ;
break;
case DASD_ECKD_CCW_WRITE_TRACK_DATA:
dedata->mask.perm = 0x02;
dedata->attributes.operation = basepriv->attrib.operation;
dedata->blk_size = blksize;
rc = set_timestamp(NULL, dedata, basedev);
dedata->ga_extended |= 0x42;
lredata->operation.orientation = 0x0;
lredata->operation.operation = 0x3F;
lredata->extended_operation = 0x23;
lredata->auxiliary.check_bytes = 0x2;
/*
* If XRC is supported the System Time Stamp is set. The
* validity of the time stamp must be reflected in the prefix
* data as well.
*/
if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
pfx_cmd = DASD_ECKD_CCW_PFX;
break;
case DASD_ECKD_CCW_READ_COUNT_MT:
dedata->mask.perm = 0x1;
dedata->attributes.operation = DASD_BYPASS_CACHE;
dedata->ga_extended |= 0x42;
dedata->blk_size = blksize;
lredata->operation.orientation = 0x2;
lredata->operation.operation = 0x16;
lredata->auxiliary.check_bytes = 0x01;
pfx_cmd = DASD_ECKD_CCW_PFX_READ;
break;
default:
DBF_DEV_EVENT(DBF_ERR, basedev,
"prepare itcw, unknown opcode 0x%x", cmd);
BUG();
break;
}
if (rc)
return rc;
dedata->attributes.mode = 0x3; /* ECKD */
heads = basepriv->rdc_data.trk_per_cyl;
begcyl = trk / heads;
beghead = trk % heads;
endcyl = totrk / heads;
endhead = totrk % heads;
/* check for sequential prestage - enhance cylinder range */
if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
dedata->attributes.operation == DASD_SEQ_ACCESS) {
if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
endcyl += basepriv->attrib.nr_cyl;
else
endcyl = (basepriv->real_cyl - 1);
}
set_ch_t(&dedata->beg_ext, begcyl, beghead);
set_ch_t(&dedata->end_ext, endcyl, endhead);
dedata->ep_format = 0x20; /* records per track is valid */
dedata->ep_rec_per_track = blk_per_trk;
if (rec_on_trk) {
switch (basepriv->rdc_data.dev_type) {
case 0x3390:
dn = ceil_quot(blksize + 6, 232);
d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
break;
case 0x3380:
d = 7 + ceil_quot(blksize + 12, 32);
sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
break;
}
}
if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
lredata->auxiliary.length_valid = 0;
lredata->auxiliary.length_scope = 0;
lredata->sector = 0xff;
} else {
lredata->auxiliary.length_valid = 1;
lredata->auxiliary.length_scope = 1;
lredata->sector = sector;
}
lredata->auxiliary.imbedded_ccw_valid = 1;
lredata->length = tlf;
lredata->imbedded_ccw = cmd;
lredata->count = count;
set_ch_t(&lredata->seek_addr, begcyl, beghead);
lredata->search_arg.cyl = lredata->seek_addr.cyl;
lredata->search_arg.head = lredata->seek_addr.head;
lredata->search_arg.record = rec_on_trk;
dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
&pfxdata, sizeof(pfxdata), total_data_size);
return PTR_ERR_OR_ZERO(dcw);
}
static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
struct dasd_device *startdev,
struct dasd_block *block,
struct request *req,
sector_t first_rec,
sector_t last_rec,
sector_t first_trk,
sector_t last_trk,
unsigned int first_offs,
unsigned int last_offs,
unsigned int blk_per_trk,
unsigned int blksize)
{
struct dasd_ccw_req *cqr;
struct req_iterator iter;
struct bio_vec bv;
char *dst;
unsigned int trkcount, ctidaw;
unsigned char cmd;
struct dasd_device *basedev;
unsigned int tlf;
struct itcw *itcw;
struct tidaw *last_tidaw = NULL;
int itcw_op;
size_t itcw_size;
u8 tidaw_flags;
unsigned int seg_len, part_len, len_to_track_end;
unsigned char new_track;
sector_t recid, trkid;
unsigned int offs;
unsigned int count, count_to_trk_end;
int ret;
basedev = block->base;
if (rq_data_dir(req) == READ) {
cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
itcw_op = ITCW_OP_READ;
} else if (rq_data_dir(req) == WRITE) {
cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
itcw_op = ITCW_OP_WRITE;
} else
return ERR_PTR(-EINVAL);
/* trackbased I/O needs address all memory via TIDAWs,
* not just for 64 bit addresses. This allows us to map
* each segment directly to one tidaw.
* In the case of write requests, additional tidaws may
* be needed when a segment crosses a track boundary.
*/
trkcount = last_trk - first_trk + 1;
ctidaw = 0;
rq_for_each_segment(bv, req, iter) {
++ctidaw;
}
if (rq_data_dir(req) == WRITE)
ctidaw += (last_trk - first_trk);
/* Allocate the ccw request. */
itcw_size = itcw_calc_size(0, ctidaw, 0);
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
/* transfer length factor: how many bytes to read from the last track */
if (first_trk == last_trk)
tlf = last_offs - first_offs + 1;
else
tlf = last_offs + 1;
tlf *= blksize;
itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
if (IS_ERR(itcw)) {
ret = -EINVAL;
goto out_error;
}
cqr->cpaddr = itcw_get_tcw(itcw);
if (prepare_itcw(itcw, first_trk, last_trk,
cmd, basedev, startdev,
first_offs + 1,
trkcount, blksize,
(last_rec - first_rec + 1) * blksize,
tlf, blk_per_trk) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
ret = -EAGAIN;
goto out_error;
}
len_to_track_end = 0;
/*
* A tidaw can address 4k of memory, but must not cross page boundaries
* We can let the block layer handle this by setting
* blk_queue_segment_boundary to page boundaries and
* blk_max_segment_size to page size when setting up the request queue.
* For write requests, a TIDAW must not cross track boundaries, because
* we have to set the CBC flag on the last tidaw for each track.
*/
if (rq_data_dir(req) == WRITE) {
new_track = 1;
recid = first_rec;
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
seg_len = bv.bv_len;
while (seg_len) {
if (new_track) {
trkid = recid;
offs = sector_div(trkid, blk_per_trk);
count_to_trk_end = blk_per_trk - offs;
count = min((last_rec - recid + 1),
(sector_t)count_to_trk_end);
len_to_track_end = count * blksize;
recid += count;
new_track = 0;
}
part_len = min(seg_len, len_to_track_end);
seg_len -= part_len;
len_to_track_end -= part_len;
/* We need to end the tidaw at track end */
if (!len_to_track_end) {
new_track = 1;
tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
} else
tidaw_flags = 0;
last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
dst, part_len);
if (IS_ERR(last_tidaw)) {
ret = -EINVAL;
goto out_error;
}
dst += part_len;
}
}
} else {
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
last_tidaw = itcw_add_tidaw(itcw, 0x00,
dst, bv.bv_len);
if (IS_ERR(last_tidaw)) {
ret = -EINVAL;
goto out_error;
}
}
}
last_tidaw->flags |= TIDAW_FLAGS_LAST;
last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
itcw_finalize(itcw);
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->cpmode = 1;
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Set flags to suppress output for expected errors */
if (dasd_eckd_is_ese(basedev)) {
set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
}
return cqr;
out_error:
dasd_sfree_request(cqr, startdev);
return ERR_PTR(ret);
}
static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
struct dasd_block *block,
struct request *req)
{
int cmdrtd, cmdwtd;
int use_prefix;
int fcx_multitrack;
struct dasd_eckd_private *private;
struct dasd_device *basedev;
sector_t first_rec, last_rec;
sector_t first_trk, last_trk;
unsigned int first_offs, last_offs;
unsigned int blk_per_trk, blksize;
int cdlspecial;
unsigned int data_size;
struct dasd_ccw_req *cqr;
basedev = block->base;
private = basedev->private;
/* Calculate number of blocks/records per track. */
blksize = block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
if (blk_per_trk == 0)
return ERR_PTR(-EINVAL);
/* Calculate record id of first and last block. */
first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
first_offs = sector_div(first_trk, blk_per_trk);
last_rec = last_trk =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
last_offs = sector_div(last_trk, blk_per_trk);
cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
fcx_multitrack = private->features.feature[40] & 0x20;
data_size = blk_rq_bytes(req);
if (data_size % blksize)
return ERR_PTR(-EINVAL);
/* tpm write request add CBC data on each track boundary */
if (rq_data_dir(req) == WRITE)
data_size += (last_trk - first_trk) * 4;
/* is read track data and write track data in command mode supported? */
cmdrtd = private->features.feature[9] & 0x20;
cmdwtd = private->features.feature[12] & 0x40;
use_prefix = private->features.feature[8] & 0x01;
cqr = NULL;
if (cdlspecial || dasd_page_cache) {
/* do nothing, just fall through to the cmd mode single case */
} else if ((data_size <= private->fcx_max_data)
&& (fcx_multitrack || (first_trk == last_trk))) {
cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
first_rec, last_rec,
first_trk, last_trk,
first_offs, last_offs,
blk_per_trk, blksize);
if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
(PTR_ERR(cqr) != -ENOMEM))
cqr = NULL;
} else if (use_prefix &&
(((rq_data_dir(req) == READ) && cmdrtd) ||
((rq_data_dir(req) == WRITE) && cmdwtd))) {
cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
first_rec, last_rec,
first_trk, last_trk,
first_offs, last_offs,
blk_per_trk, blksize);
if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
(PTR_ERR(cqr) != -ENOMEM))
cqr = NULL;
}
if (!cqr)
cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
first_rec, last_rec,
first_trk, last_trk,
first_offs, last_offs,
blk_per_trk, blksize);
return cqr;
}
static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
struct dasd_block *block,
struct request *req)
{
sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
unsigned int seg_len, len_to_track_end;
unsigned int cidaw, cplength, datasize;
sector_t first_trk, last_trk, sectors;
struct dasd_eckd_private *base_priv;
struct dasd_device *basedev;
struct req_iterator iter;
struct dasd_ccw_req *cqr;
unsigned int trkcount;
unsigned long *idaws;
unsigned int size;
unsigned char cmd;
struct bio_vec bv;
struct ccw1 *ccw;
int use_prefix;
void *data;
char *dst;
/*
* raw track access needs to be mutiple of 64k and on 64k boundary
* For read requests we can fix an incorrect alignment by padding
* the request with dummy pages.
*/
start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
DASD_RAW_SECTORS_PER_TRACK;
end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
DASD_RAW_SECTORS_PER_TRACK;
basedev = block->base;
if ((start_padding_sectors || end_padding_sectors) &&
(rq_data_dir(req) == WRITE)) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"raw write not track aligned (%llu,%llu) req %p",
start_padding_sectors, end_padding_sectors, req);
return ERR_PTR(-EINVAL);
}
first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
DASD_RAW_SECTORS_PER_TRACK;
trkcount = last_trk - first_trk + 1;
if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_TRACK;
else if (rq_data_dir(req) == WRITE)
cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
else
return ERR_PTR(-EINVAL);
/*
* Raw track based I/O needs IDAWs for each page,
* and not just for 64 bit addresses.
*/
cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
/*
* struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
* of extended parameter. This is needed for write full track.
*/
base_priv = basedev->private;
use_prefix = base_priv->features.feature[8] & 0x01;
if (use_prefix) {
cplength = 1 + trkcount;
size = sizeof(struct PFX_eckd_data) + 2;
} else {
cplength = 2 + trkcount;
size = sizeof(struct DE_eckd_data) +
sizeof(struct LRE_eckd_data) + 2;
}
size = ALIGN(size, 8);
datasize = size + cidaw * sizeof(unsigned long);
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
datasize, startdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
data = cqr->data;
if (use_prefix) {
prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
startdev, 1, 0, trkcount, 0, 0);
} else {
define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
ccw[-1].flags |= CCW_FLAG_CC;
data += sizeof(struct DE_eckd_data);
locate_record_ext(ccw++, data, first_trk, 0,
trkcount, cmd, basedev, 0, 0);
}
idaws = (unsigned long *)(cqr->data + size);
len_to_track_end = 0;
if (start_padding_sectors) {
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd;
/* maximum 3390 track size */
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536 - start_padding_sectors * 512;
ccw->cda = (__u32)virt_to_phys(idaws);
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
}
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
seg_len = bv.bv_len;
if (cmd == DASD_ECKD_CCW_READ_TRACK)
memset(dst, 0, seg_len);
if (!len_to_track_end) {
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd;
/* maximum 3390 track size */
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536;
ccw->cda = (__u32)virt_to_phys(idaws);
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
}
len_to_track_end -= seg_len;
idaws = idal_create_words(idaws, dst, seg_len);
}
for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->startdev = startdev;
cqr->memdev = startdev;
cqr->block = block;
cqr->expires = startdev->default_expires * HZ;
cqr->lpm = dasd_path_get_ppm(startdev);
cqr->retries = startdev->default_retries;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
static int
dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
{
struct dasd_eckd_private *private;
struct ccw1 *ccw;
struct req_iterator iter;
struct bio_vec bv;
char *dst, *cda;
unsigned int blksize, blk_per_trk, off;
sector_t recid;
int status;
if (!dasd_page_cache)
goto out;
private = cqr->block->base->private;
blksize = cqr->block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
ccw = cqr->cpaddr;
/* Skip over define extent & locate record. */
ccw++;
if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
ccw++;
rq_for_each_segment(bv, req, iter) {
dst = bvec_virt(&bv);
for (off = 0; off < bv.bv_len; off += blksize) {
/* Skip locate record. */
if (private->uses_cdl && recid <= 2*blk_per_trk)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
cda = *((char **)phys_to_virt(ccw->cda));
else
cda = phys_to_virt(ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv.bv_len);
kmem_cache_free(dasd_page_cache,
(void *)((addr_t)cda & PAGE_MASK));
}
dst = NULL;
}
ccw++;
recid++;
}
}
out:
status = cqr->status == DASD_CQR_DONE;
dasd_sfree_request(cqr, cqr->memdev);
return status;
}
/*
* Modify ccw/tcw in cqr so it can be started on a base device.
*
* Note that this is not enough to restart the cqr!
* Either reset cqr->startdev as well (summary unit check handling)
* or restart via separate cqr (as in ERP handling).
*/
void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
{
struct ccw1 *ccw;
struct PFX_eckd_data *pfxdata;
struct tcw *tcw;
struct tccb *tccb;
struct dcw *dcw;
if (cqr->cpmode == 1) {
tcw = cqr->cpaddr;
tccb = tcw_get_tccb(tcw);
dcw = (struct dcw *)&tccb->tca[0];
pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
pfxdata->validity.verify_base = 0;
pfxdata->validity.hyper_pav = 0;
} else {
ccw = cqr->cpaddr;
pfxdata = cqr->data;
if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
pfxdata->validity.verify_base = 0;
pfxdata->validity.hyper_pav = 0;
}
}
}
#define DASD_ECKD_CHANQ_MAX_SIZE 4
static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
struct dasd_block *block,
struct request *req)
{
struct dasd_eckd_private *private;
struct dasd_device *startdev;
unsigned long flags;
struct dasd_ccw_req *cqr;
startdev = dasd_alias_get_start_dev(base);
if (!startdev)
startdev = base;
private = startdev->private;
if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
return ERR_PTR(-EBUSY);
spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
private->count++;
if ((base->features & DASD_FEATURE_USERAW))
cqr = dasd_eckd_build_cp_raw(startdev, block, req);
else
cqr = dasd_eckd_build_cp(startdev, block, req);
if (IS_ERR(cqr))
private->count--;
spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
return cqr;
}
static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
struct request *req)
{
struct dasd_eckd_private *private;
unsigned long flags;
spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
private = cqr->memdev->private;
private->count--;
spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
return dasd_eckd_free_cp(cqr, req);
}
static int
dasd_eckd_fill_info(struct dasd_device * device,
struct dasd_information2_t * info)
{
struct dasd_eckd_private *private = device->private;
info->label_block = 2;
info->FBA_layout = private->uses_cdl ? 0 : 1;
info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
info->characteristics_size = sizeof(private->rdc_data);
memcpy(info->characteristics, &private->rdc_data,
sizeof(private->rdc_data));
info->confdata_size = min_t(unsigned long, private->conf.len,
sizeof(info->configuration_data));
memcpy(info->configuration_data, private->conf.data,
info->confdata_size);
return 0;
}
/*
* SECTION: ioctl functions for eckd devices.
*/
/*
* Release device ioctl.
* Buils a channel programm to releases a prior reserved
* (see dasd_eckd_reserve) device.
*/
static int
dasd_eckd_release(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
int useglobal;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex);
useglobal = 1;
cqr = &dasd_reserve_req->cqr;
memset(cqr, 0, sizeof(*cqr));
memset(&dasd_reserve_req->ccw, 0,
sizeof(dasd_reserve_req->ccw));
cqr->cpaddr = &dasd_reserve_req->ccw;
cqr->data = &dasd_reserve_req->data;
cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
ccw->cda = (__u32)virt_to_phys(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 2; /* set retry counter to enable basic ERP */
cqr->expires = 2 * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
if (!rc)
clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
if (useglobal)
mutex_unlock(&dasd_reserve_mutex);
else
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Reserve device ioctl.
* Options are set to 'synchronous wait for interrupt' and
* 'timeout the request'. This leads to a terminate IO if
* the interrupt is outstanding for a certain time.
*/
static int
dasd_eckd_reserve(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
int useglobal;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex);
useglobal = 1;
cqr = &dasd_reserve_req->cqr;
memset(cqr, 0, sizeof(*cqr));
memset(&dasd_reserve_req->ccw, 0,
sizeof(dasd_reserve_req->ccw));
cqr->cpaddr = &dasd_reserve_req->ccw;
cqr->data = &dasd_reserve_req->data;
cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
ccw->cda = (__u32)virt_to_phys(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 2; /* set retry counter to enable basic ERP */
cqr->expires = 2 * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
if (!rc)
set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
if (useglobal)
mutex_unlock(&dasd_reserve_mutex);
else
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Steal lock ioctl - unconditional reserve device.
* Buils a channel programm to break a device's reservation.
* (unconditional reserve)
*/
static int
dasd_eckd_steal_lock(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
int useglobal;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex);
useglobal = 1;
cqr = &dasd_reserve_req->cqr;
memset(cqr, 0, sizeof(*cqr));
memset(&dasd_reserve_req->ccw, 0,
sizeof(dasd_reserve_req->ccw));
cqr->cpaddr = &dasd_reserve_req->ccw;
cqr->data = &dasd_reserve_req->data;
cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_SLCK;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
ccw->cda = (__u32)virt_to_phys(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
cqr->retries = 2; /* set retry counter to enable basic ERP */
cqr->expires = 2 * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
if (!rc)
set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
if (useglobal)
mutex_unlock(&dasd_reserve_mutex);
else
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* SNID - Sense Path Group ID
* This ioctl may be used in situations where I/O is stalled due to
* a reserve, so if the normal dasd_smalloc_request fails, we use the
* preallocated dasd_reserve_req.
*/
static int dasd_eckd_snid(struct dasd_device *device,
void __user *argp)
{
struct dasd_ccw_req *cqr;
int rc;
struct ccw1 *ccw;
int useglobal;
struct dasd_snid_ioctl_data usrparm;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
return -EFAULT;
useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
sizeof(struct dasd_snid_data), device,
NULL);
if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex);
useglobal = 1;
cqr = &dasd_reserve_req->cqr;
memset(cqr, 0, sizeof(*cqr));
memset(&dasd_reserve_req->ccw, 0,
sizeof(dasd_reserve_req->ccw));
cqr->cpaddr = &dasd_reserve_req->ccw;
cqr->data = &dasd_reserve_req->data;
cqr->magic = DASD_ECKD_MAGIC;
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_SNID;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 12;
ccw->cda = (__u32)virt_to_phys(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
cqr->retries = 5;
cqr->expires = 10 * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
cqr->lpm = usrparm.path_mask;
rc = dasd_sleep_on_immediatly(cqr);
/* verify that I/O processing didn't modify the path mask */
if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
rc = -EIO;
if (!rc) {
usrparm.data = *((struct dasd_snid_data *)cqr->data);
if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
rc = -EFAULT;
}
if (useglobal)
mutex_unlock(&dasd_reserve_mutex);
else
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Read performance statistics
*/
static int
dasd_eckd_performance(struct dasd_device *device, void __user *argp)
{
struct dasd_psf_prssd_data *prssdp;
struct dasd_rssd_perf_stats_t *stats;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_perf_stats_t)),
device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
return PTR_ERR(cqr);
}
cqr->startdev = device;
cqr->memdev = device;
cqr->retries = 0;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = 0x01; /* Performance Statistics */
prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->cda = (__u32)virt_to_phys(prssdp);
/* Read Subsystem Data - Performance Statistics */
stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
ccw->cda = (__u32)virt_to_phys(stats);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on(cqr);
if (rc == 0) {
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
if (copy_to_user(argp, stats,
sizeof(struct dasd_rssd_perf_stats_t)))
rc = -EFAULT;
}
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* Get attributes (cache operations)
* Returnes the cache attributes used in Define Extend (DE).
*/
static int
dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
{
struct dasd_eckd_private *private = device->private;
struct attrib_data_t attrib = private->attrib;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!argp)
return -EINVAL;
rc = 0;
if (copy_to_user(argp, (long *) &attrib,
sizeof(struct attrib_data_t)))
rc = -EFAULT;
return rc;
}
/*
* Set attributes (cache operations)
* Stores the attributes for cache operation to be used in Define Extend (DE).
*/
static int
dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
{
struct dasd_eckd_private *private = device->private;
struct attrib_data_t attrib;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!argp)
return -EINVAL;
if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
return -EFAULT;
private->attrib = attrib;
dev_info(&device->cdev->dev,
"The DASD cache mode was set to %x (%i cylinder prestage)\n",
private->attrib.operation, private->attrib.nr_cyl);
return 0;
}
/*
* Issue syscall I/O to EMC Symmetrix array.
* CCWs are PSF and RSSD
*/
static int dasd_symm_io(struct dasd_device *device, void __user *argp)
{
struct dasd_symmio_parms usrparm;
char *psf_data, *rssd_result;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
char psf0, psf1;
int rc;
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
return -EACCES;
psf0 = psf1 = 0;
/* Copy parms from caller */
rc = -EFAULT;
if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
goto out;
if (is_compat_task()) {
/* Make sure pointers are sane even on 31 bit. */
rc = -EINVAL;
if ((usrparm.psf_data >> 32) != 0)
goto out;
if ((usrparm.rssd_result >> 32) != 0)
goto out;
usrparm.psf_data &= 0x7fffffffULL;
usrparm.rssd_result &= 0x7fffffffULL;
}
/* at least 2 bytes are accessed and should be allocated */
if (usrparm.psf_data_len < 2) {
DBF_DEV_EVENT(DBF_WARNING, device,
"Symmetrix ioctl invalid data length %d",
usrparm.psf_data_len);
rc = -EINVAL;
goto out;
}
/* alloc I/O data area */
psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
if (!psf_data || !rssd_result) {
rc = -ENOMEM;
goto out_free;
}
/* get syscall header from user space */
rc = -EFAULT;
if (copy_from_user(psf_data,
(void __user *)(unsigned long) usrparm.psf_data,
usrparm.psf_data_len))
goto out_free;
psf0 = psf_data[0];
psf1 = psf_data[1];
/* setup CCWs for PSF + RSSD */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
rc = PTR_ERR(cqr);
goto out_free;
}
cqr->startdev = device;
cqr->memdev = device;
cqr->retries = 3;
cqr->expires = 10 * HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* Build the ccws */
ccw = cqr->cpaddr;
/* PSF ccw */
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = usrparm.psf_data_len;
ccw->flags |= CCW_FLAG_CC;
ccw->cda = (__u32)virt_to_phys(psf_data);
ccw++;
/* RSSD ccw */
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = usrparm.rssd_result_len;
ccw->flags = CCW_FLAG_SLI ;
ccw->cda = (__u32)virt_to_phys(rssd_result);
rc = dasd_sleep_on(cqr);
if (rc)
goto out_sfree;
rc = -EFAULT;
if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
rssd_result, usrparm.rssd_result_len))
goto out_sfree;
rc = 0;
out_sfree:
dasd_sfree_request(cqr, cqr->memdev);
out_free:
kfree(rssd_result);
kfree(psf_data);
out:
DBF_DEV_EVENT(DBF_WARNING, device,
"Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
(int) psf0, (int) psf1, rc);
return rc;
}
static int
dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
{
struct dasd_device *device = block->base;
switch (cmd) {
case BIODASDGATTR:
return dasd_eckd_get_attrib(device, argp);
case BIODASDSATTR:
return dasd_eckd_set_attrib(device, argp);
case BIODASDPSRD:
return dasd_eckd_performance(device, argp);
case BIODASDRLSE:
return dasd_eckd_release(device);
case BIODASDRSRV:
return dasd_eckd_reserve(device);
case BIODASDSLCK:
return dasd_eckd_steal_lock(device);
case BIODASDSNID:
return dasd_eckd_snid(device, argp);
case BIODASDSYMMIO:
return dasd_symm_io(device, argp);
default:
return -ENOTTY;
}
}
/*
* Dump the range of CCWs into 'page' buffer
* and return number of printed chars.
*/
static void
dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
{
int len, count;
char *datap;
len = 0;
while (from <= to) {
len += sprintf(page + len, PRINTK_HEADER
" CCW %p: %08X %08X DAT:",
from, ((int *) from)[0], ((int *) from)[1]);
/* get pointer to data (consider IDALs) */
if (from->flags & CCW_FLAG_IDA)
datap = (char *)*((addr_t *)phys_to_virt(from->cda));
else
datap = phys_to_virt(from->cda);
/* dump data (max 128 bytes) */
for (count = 0; count < from->count && count < 128; count++) {
if (count % 32 == 0)
len += sprintf(page + len, "\n");
if (count % 8 == 0)
len += sprintf(page + len, " ");
if (count % 4 == 0)
len += sprintf(page + len, " ");
len += sprintf(page + len, "%02x", datap[count]);
}
len += sprintf(page + len, "\n");
from++;
}
if (len > 0)
printk(KERN_ERR "%s", page);
}
static void
dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
char *reason)
{
u64 *sense;
u64 *stat;
sense = (u64 *) dasd_get_sense(irb);
stat = (u64 *) &irb->scsw;
if (sense) {
DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
"%016llx %016llx %016llx %016llx",
reason, *stat, *((u32 *) (stat + 1)),
sense[0], sense[1], sense[2], sense[3]);
} else {
DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
reason, *stat, *((u32 *) (stat + 1)),
"NO VALID SENSE");
}
}
/*
* Print sense data and related channel program.
* Parts are printed because printk buffer is only 1024 bytes.
*/
static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
char *page;
struct ccw1 *first, *last, *fail, *from, *to;
int len, sl, sct;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"No memory to dump sense data\n");
return;
}
/* dump the sense data */
len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
len += sprintf(page + len, PRINTK_HEADER
" in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
"CS:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
req ? req->intrc : 0);
len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
phys_to_virt(irb->scsw.cmd.cpa));
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
len += sprintf(page + len, " %02x",
irb->ecw[8 * sl + sct]);
}
len += sprintf(page + len, "\n");
}
if (irb->ecw[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
sprintf(page + len, PRINTK_HEADER
" 24 Byte: %x MSG %x, "
"%s MSGb to SYSOP\n",
irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
irb->ecw[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
sprintf(page + len, PRINTK_HEADER
" 32 Byte: Format: %x "
"Exception class %x\n",
irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
}
} else {
sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
printk(KERN_ERR "%s", page);
if (req) {
/* req == NULL for unsolicited interrupts */
/* dump the Channel Program (max 140 Bytes per line) */
/* Count CCW and print first CCWs (maximum 7) */
first = req->cpaddr;
for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
to = min(first + 6, last);
printk(KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req);
dasd_eckd_dump_ccw_range(first, to, page);
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
from = ++to;
fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
printk(KERN_ERR PRINTK_HEADER "......\n");
}
to = min(fail + 1, last);
dasd_eckd_dump_ccw_range(from, to, page + len);
/* print last CCWs (maximum 2) */
len = 0;
from = max(from, ++to);
if (from < last - 1) {
from = last - 1; /* there is a gap - print header */
printk(KERN_ERR PRINTK_HEADER "......\n");
}
dasd_eckd_dump_ccw_range(from, last, page + len);
}
free_page((unsigned long) page);
}
/*
* Print sense data from a tcw.
*/
static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
char *page;
int len, sl, sct, residual;
struct tsb *tsb;
u8 *sense, *rcq;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, " %s",
"No memory to dump sense data");
return;
}
/* dump the sense data */
len = sprintf(page, PRINTK_HEADER
" I/O status report for device %s:\n",
dev_name(&device->cdev->dev));
len += sprintf(page + len, PRINTK_HEADER
" in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
"CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
irb->scsw.tm.fcxs,
(irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
req ? req->intrc : 0);
len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing TCW: %p\n",
dev_name(&device->cdev->dev),
phys_to_virt(irb->scsw.tm.tcw));
tsb = NULL;
sense = NULL;
if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
if (tsb) {
len += sprintf(page + len, PRINTK_HEADER
" tsb->length %d\n", tsb->length);
len += sprintf(page + len, PRINTK_HEADER
" tsb->flags %x\n", tsb->flags);
len += sprintf(page + len, PRINTK_HEADER
" tsb->dcw_offset %d\n", tsb->dcw_offset);
len += sprintf(page + len, PRINTK_HEADER
" tsb->count %d\n", tsb->count);
residual = tsb->count - 28;
len += sprintf(page + len, PRINTK_HEADER
" residual %d\n", residual);
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.dev_time %d\n",
tsb->tsa.iostat.dev_time);
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.def_time %d\n",
tsb->tsa.iostat.def_time);
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.queue_time %d\n",
tsb->tsa.iostat.queue_time);
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.dev_busy_time %d\n",
tsb->tsa.iostat.dev_busy_time);
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.iostat.dev_act_time %d\n",
tsb->tsa.iostat.dev_act_time);
sense = tsb->tsa.iostat.sense;
break;
case 2: /* ts_ddpc */
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
for (sl = 0; sl < 2; sl++) {
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.ddpc.rcq %2d-%2d: ",
(8 * sl), ((8 * sl) + 7));
rcq = tsb->tsa.ddpc.rcq;
for (sct = 0; sct < 8; sct++) {
len += sprintf(page + len, " %02x",
rcq[8 * sl + sct]);
}
len += sprintf(page + len, "\n");
}
sense = tsb->tsa.ddpc.sense;
break;
case 3: /* tsa_intrg */
len += sprintf(page + len, PRINTK_HEADER
" tsb->tsa.intrg.: not supported yet\n");
break;
}
if (sense) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, PRINTK_HEADER
" Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
len += sprintf(page + len, " %02x",
sense[8 * sl + sct]);
}
len += sprintf(page + len, "\n");
}
if (sense[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
sprintf(page + len, PRINTK_HEADER
" 24 Byte: %x MSG %x, "
"%s MSGb to SYSOP\n",
sense[7] >> 4, sense[7] & 0x0f,
sense[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
sprintf(page + len, PRINTK_HEADER
" 32 Byte: Format: %x "
"Exception class %x\n",
sense[6] & 0x0f, sense[22] >> 4);
}
} else {
sprintf(page + len, PRINTK_HEADER
" SORRY - NO VALID SENSE AVAILABLE\n");
}
} else {
sprintf(page + len, PRINTK_HEADER
" SORRY - NO TSB DATA AVAILABLE\n");
}
printk(KERN_ERR "%s", page);
free_page((unsigned long) page);
}
static void dasd_eckd_dump_sense(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
u8 *sense = dasd_get_sense(irb);
if (scsw_is_tm(&irb->scsw)) {
/*
* In some cases the 'File Protected' or 'Incorrect Length'
* error might be expected and log messages shouldn't be written
* then. Check if the according suppress bit is set.
*/
if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
return;
if (scsw_cstat(&irb->scsw) == 0x40 &&
test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
return;
dasd_eckd_dump_sense_tcw(device, req, irb);
} else {
/*
* In some cases the 'Command Reject' or 'No Record Found'
* error might be expected and log messages shouldn't be
* written then. Check if the according suppress bit is set.
*/
if (sense && sense[0] & SNS0_CMD_REJECT &&
test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
return;
if (sense && sense[1] & SNS1_NO_REC_FOUND &&
test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
return;
dasd_eckd_dump_sense_ccw(device, req, irb);
}
}
static int dasd_eckd_reload_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
char print_uid[DASD_UID_STRLEN];
int rc, old_base;
struct dasd_uid uid;
unsigned long flags;
/*
* remove device from alias handling to prevent new requests
* from being scheduled on the wrong alias device
*/
dasd_alias_remove_device(device);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
old_base = private->uid.base_unit_addr;
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
/* Read Configuration Data */
rc = dasd_eckd_read_conf(device);
if (rc)
goto out_err;
dasd_eckd_read_fc_security(device);
rc = dasd_eckd_generate_uid(device);
if (rc)
goto out_err;
/*
* update unit address configuration and
* add device to alias management
*/
dasd_alias_update_add_device(device);
dasd_eckd_get_uid(device, &uid);
if (old_base != uid.base_unit_addr) {
dasd_eckd_get_uid_string(&private->conf, print_uid);
dev_info(&device->cdev->dev,
"An Alias device was reassigned to a new base device "
"with UID: %s\n", print_uid);
}
return 0;
out_err:
return -1;
}
static int dasd_eckd_read_message_buffer(struct dasd_device *device,
struct dasd_rssd_messages *messages,
__u8 lpum)
{
struct dasd_rssd_messages *message_buf;
struct dasd_psf_prssd_data *prssdp;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_messages)),
device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate read message buffer request");
return PTR_ERR(cqr);
}
cqr->lpm = lpum;
retry:
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->expires = 10 * HZ;
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
/* dasd_sleep_on_immediatly does not do complex error
* recovery so clear erp flag and set retry counter to
* do basic erp */
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
cqr->retries = 256;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = 0x03; /* Message Buffer */
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = (__u32)virt_to_phys(prssdp);
/* Read Subsystem Data - message buffer */
message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_messages);
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = (__u32)virt_to_phys(message_buf);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_immediatly(cqr);
if (rc == 0) {
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
message_buf = (struct dasd_rssd_messages *)
(prssdp + 1);
memcpy(messages, message_buf,
sizeof(struct dasd_rssd_messages));
} else if (cqr->lpm) {
/*
* on z/VM we might not be able to do I/O on the requested path
* but instead we get the required information on any path
* so retry with open path mask
*/
cqr->lpm = 0;
goto retry;
} else
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading messages failed with rc=%d\n"
, rc);
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
static int dasd_eckd_query_host_access(struct dasd_device *device,
struct dasd_psf_query_host_access *data)
{
struct dasd_eckd_private *private = device->private;
struct dasd_psf_query_host_access *host_access;
struct dasd_psf_prssd_data *prssdp;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
/* not available for HYPER PAV alias devices */
if (!device->block && private->lcu->pav == HYPER_PAV)
return -EOPNOTSUPP;
/* may not be supported by the storage server */
if (!(private->features.feature[14] & 0x80))
return -EOPNOTSUPP;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
sizeof(struct dasd_psf_prssd_data) + 1,
device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate read message buffer request");
return PTR_ERR(cqr);
}
host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
if (!host_access) {
dasd_sfree_request(cqr, device);
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate host_access buffer");
return -ENOMEM;
}
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *) cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
/* LSS and Volume that will be queried */
prssdp->lss = private->conf.ned->ID;
prssdp->volume = private->conf.ned->unit_addr;
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = (__u32)virt_to_phys(prssdp);
/* Read Subsystem Data - query host access */
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_psf_query_host_access);
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = (__u32)virt_to_phys(host_access);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
/* the command might not be supported, suppress error message */
__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
rc = dasd_sleep_on_interruptible(cqr);
if (rc == 0) {
*data = *host_access;
} else {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"Reading host access data failed with rc=%d\n",
rc);
rc = -EOPNOTSUPP;
}
dasd_sfree_request(cqr, cqr->memdev);
kfree(host_access);
return rc;
}
/*
* return number of grouped devices
*/
static int dasd_eckd_host_access_count(struct dasd_device *device)
{
struct dasd_psf_query_host_access *access;
struct dasd_ckd_path_group_entry *entry;
struct dasd_ckd_host_information *info;
int count = 0;
int rc, i;
access = kzalloc(sizeof(*access), GFP_NOIO);
if (!access) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate access buffer");
return -ENOMEM;
}
rc = dasd_eckd_query_host_access(device, access);
if (rc) {
kfree(access);
return rc;
}
info = (struct dasd_ckd_host_information *)
access->host_access_information;
for (i = 0; i < info->entry_count; i++) {
entry = (struct dasd_ckd_path_group_entry *)
(info->entry + i * info->entry_size);
if (entry->status_flags & DASD_ECKD_PG_GROUPED)
count++;
}
kfree(access);
return count;
}
/*
* write host access information to a sequential file
*/
static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
{
struct dasd_psf_query_host_access *access;
struct dasd_ckd_path_group_entry *entry;
struct dasd_ckd_host_information *info;
char sysplex[9] = "";
int rc, i;
access = kzalloc(sizeof(*access), GFP_NOIO);
if (!access) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate access buffer");
return -ENOMEM;
}
rc = dasd_eckd_query_host_access(device, access);
if (rc) {
kfree(access);
return rc;
}
info = (struct dasd_ckd_host_information *)
access->host_access_information;
for (i = 0; i < info->entry_count; i++) {
entry = (struct dasd_ckd_path_group_entry *)
(info->entry + i * info->entry_size);
/* PGID */
seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
/* FLAGS */
seq_printf(m, "status_flags %02x\n", entry->status_flags);
/* SYSPLEX NAME */
memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
EBCASC(sysplex, sizeof(sysplex));
seq_printf(m, "sysplex_name %8s\n", sysplex);
/* SUPPORTED CYLINDER */
seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
/* TIMESTAMP */
seq_printf(m, "timestamp %lu\n", (unsigned long)
entry->timestamp);
}
kfree(access);
return 0;
}
static struct dasd_device
*copy_relation_find_device(struct dasd_copy_relation *copy,
char *busid)
{
int i;
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (copy->entry[i].configured &&
strncmp(copy->entry[i].busid, busid, DASD_BUS_ID_SIZE) == 0)
return copy->entry[i].device;
}
return NULL;
}
/*
* set the new active/primary device
*/
static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busid,
char *old_busid)
{
int i;
for (i = 0; i < DASD_CP_ENTRIES; i++) {
if (copy->entry[i].configured &&
strncmp(copy->entry[i].busid, new_busid,
DASD_BUS_ID_SIZE) == 0) {
copy->active = ©->entry[i];
copy->entry[i].primary = true;
} else if (copy->entry[i].configured &&
strncmp(copy->entry[i].busid, old_busid,
DASD_BUS_ID_SIZE) == 0) {
copy->entry[i].primary = false;
}
}
}
/*
* The function will swap the role of a given copy pair.
* During the swap operation the relation of the blockdevice is disconnected
* from the old primary and connected to the new.
*
* IO is paused on the block queue before swap and may be resumed afterwards.
*/
static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid,
char *sec_busid)
{
struct dasd_device *primary, *secondary;
struct dasd_copy_relation *copy;
struct dasd_block *block;
struct gendisk *gdp;
copy = device->copy;
if (!copy)
return DASD_COPYPAIRSWAP_INVALID;
primary = copy->active->device;
if (!primary)
return DASD_COPYPAIRSWAP_INVALID;
/* double check if swap has correct primary */
if (strncmp(dev_name(&primary->cdev->dev), prim_busid, DASD_BUS_ID_SIZE) != 0)
return DASD_COPYPAIRSWAP_PRIMARY;
secondary = copy_relation_find_device(copy, sec_busid);
if (!secondary)
return DASD_COPYPAIRSWAP_SECONDARY;
/*
* usually the device should be quiesced for swap
* for paranoia stop device and requeue requests again
*/
dasd_device_set_stop_bits(primary, DASD_STOPPED_PPRC);
dasd_device_set_stop_bits(secondary, DASD_STOPPED_PPRC);
dasd_generic_requeue_all_requests(primary);
/* swap DASD internal device <> block assignment */
block = primary->block;
primary->block = NULL;
secondary->block = block;
block->base = secondary;
/* set new primary device in COPY relation */
copy_pair_set_active(copy, sec_busid, prim_busid);
/* swap blocklayer device link */
gdp = block->gdp;
dasd_add_link_to_gendisk(gdp, secondary);
/* re-enable device */
dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
dasd_device_remove_stop_bits(secondary, DASD_STOPPED_PPRC);
dasd_schedule_device_bh(secondary);
return DASD_COPYPAIRSWAP_SUCCESS;
}
/*
* Perform Subsystem Function - Peer-to-Peer Remote Copy Extended Query
*/
static int dasd_eckd_query_pprc_status(struct dasd_device *device,
struct dasd_pprc_data_sc4 *data)
{
struct dasd_pprc_data_sc4 *pprc_data;
struct dasd_psf_prssd_data *prssdp;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
sizeof(*prssdp) + sizeof(*pprc_data) + 1,
device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate query PPRC status request");
return PTR_ERR(cqr);
}
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10 * HZ;
/* Prepare for Read Subsystem Data */
prssdp = (struct dasd_psf_prssd_data *)cqr->data;
memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_PPRCEQ;
prssdp->varies[0] = PPRCEQ_SCOPE_4;
pprc_data = (struct dasd_pprc_data_sc4 *)(prssdp + 1);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = (__u32)(addr_t)prssdp;
/* Read Subsystem Data - query host access */
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*pprc_data);
ccw->flags |= CCW_FLAG_SLI;
ccw->cda = (__u32)(addr_t)pprc_data;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_interruptible(cqr);
if (rc == 0) {
*data = *pprc_data;
} else {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"PPRC Extended Query failed with rc=%d\n",
rc);
rc = -EOPNOTSUPP;
}
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* ECKD NOP - no operation
*/
static int dasd_eckd_nop(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 1, device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate NOP request");
return PTR_ERR(cqr);
}
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 1;
cqr->expires = 10 * HZ;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_NOP;
ccw->flags |= CCW_FLAG_SLI;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
rc = dasd_sleep_on_interruptible(cqr);
if (rc != 0) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
"NOP failed with rc=%d\n", rc);
rc = -EOPNOTSUPP;
}
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
static int dasd_eckd_device_ping(struct dasd_device *device)
{
return dasd_eckd_nop(device);
}
/*
* Perform Subsystem Function - CUIR response
*/
static int
dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
__u32 message_id, __u8 lpum)
{
struct dasd_psf_cuir_response *psf_cuir;
int pos = pathmask_to_pos(lpum);
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
int rc;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
sizeof(struct dasd_psf_cuir_response),
device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate PSF-CUIR request");
return PTR_ERR(cqr);
}
psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
psf_cuir->cc = response;
psf_cuir->chpid = device->path[pos].chpid;
psf_cuir->message_id = message_id;
psf_cuir->cssid = device->path[pos].cssid;
psf_cuir->ssid = device->path[pos].ssid;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->cda = (__u32)virt_to_phys(psf_cuir);
ccw->flags = CCW_FLAG_SLI;
ccw->count = sizeof(struct dasd_psf_cuir_response);
cqr->startdev = device;
cqr->memdev = device;
cqr->block = NULL;
cqr->retries = 256;
cqr->expires = 10*HZ;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
rc = dasd_sleep_on(cqr);
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
/*
* return configuration data that is referenced by record selector
* if a record selector is specified or per default return the
* conf_data pointer for the path specified by lpum
*/
static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
__u8 lpum,
struct dasd_cuir_message *cuir)
{
struct dasd_conf_data *conf_data;
int path, pos;
if (cuir->record_selector == 0)
goto out;
for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
conf_data = device->path[pos].conf_data;
if (conf_data->gneq.record_selector ==
cuir->record_selector)
return conf_data;
}
out:
return device->path[pathmask_to_pos(lpum)].conf_data;
}
/*
* This function determines the scope of a reconfiguration request by
* analysing the path and device selection data provided in the CUIR request.
* Returns a path mask containing CUIR affected paths for the give device.
*
* If the CUIR request does not contain the required information return the
* path mask of the path the attention message for the CUIR request was reveived
* on.
*/
static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
struct dasd_cuir_message *cuir)
{
struct dasd_conf_data *ref_conf_data;
unsigned long bitmask = 0, mask = 0;
struct dasd_conf_data *conf_data;
unsigned int pos, path;
char *ref_gneq, *gneq;
char *ref_ned, *ned;
int tbcpm = 0;
/* if CUIR request does not specify the scope use the path
the attention message was presented on */
if (!cuir->ned_map ||
!(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
return lpum;
/* get reference conf data */
ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
/* reference ned is determined by ned_map field */
pos = 8 - ffs(cuir->ned_map);
ref_ned = (char *)&ref_conf_data->neds[pos];
ref_gneq = (char *)&ref_conf_data->gneq;
/* transfer 24 bit neq_map to mask */
mask = cuir->neq_map[2];
mask |= cuir->neq_map[1] << 8;
mask |= cuir->neq_map[0] << 16;
for (path = 0; path < 8; path++) {
/* initialise data per path */
bitmask = mask;
conf_data = device->path[path].conf_data;
pos = 8 - ffs(cuir->ned_map);
ned = (char *) &conf_data->neds[pos];
/* compare reference ned and per path ned */
if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
continue;
gneq = (char *)&conf_data->gneq;
/* compare reference gneq and per_path gneq under
24 bit mask where mask bit 0 equals byte 7 of
the gneq and mask bit 24 equals byte 31 */
while (bitmask) {
pos = ffs(bitmask) - 1;
if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
!= 0)
break;
clear_bit(pos, &bitmask);
}
if (bitmask)
continue;
/* device and path match the reference values
add path to CUIR scope */
tbcpm |= 0x80 >> path;
}
return tbcpm;
}
static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
unsigned long paths, int action)
{
int pos;
while (paths) {
/* get position of bit in mask */
pos = 8 - ffs(paths);
/* get channel path descriptor from this position */
if (action == CUIR_QUIESCE)
pr_warn("Service on the storage server caused path %x.%02x to go offline",
device->path[pos].cssid,
device->path[pos].chpid);
else if (action == CUIR_RESUME)
pr_info("Path %x.%02x is back online after service on the storage server",
device->path[pos].cssid,
device->path[pos].chpid);
clear_bit(7 - pos, &paths);
}
}
static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
struct dasd_cuir_message *cuir)
{
unsigned long tbcpm;
tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
/* nothing to do if path is not in use */
if (!(dasd_path_get_opm(device) & tbcpm))
return 0;
if (!(dasd_path_get_opm(device) & ~tbcpm)) {
/* no path would be left if the CUIR action is taken
return error */
return -EINVAL;
}
/* remove device from operational path mask */
dasd_path_remove_opm(device, tbcpm);
dasd_path_add_cuirpm(device, tbcpm);
return tbcpm;
}
/*
* walk through all devices and build a path mask to quiesce them
* return an error if the last path to a device would be removed
*
* if only part of the devices are quiesced and an error
* occurs no onlining necessary, the storage server will
* notify the already set offline devices again
*/
static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
struct dasd_cuir_message *cuir)
{
struct dasd_eckd_private *private = device->private;
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_device *dev, *n;
unsigned long paths = 0;
unsigned long flags;
int tbcpm;
/* active devices */
list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
alias_list) {
spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
if (tbcpm < 0)
goto out_err;
paths |= tbcpm;
}
/* inactive devices */
list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
alias_list) {
spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
if (tbcpm < 0)
goto out_err;
paths |= tbcpm;
}
/* devices in PAV groups */
list_for_each_entry_safe(pavgroup, tempgroup,
&private->lcu->grouplist, group) {
list_for_each_entry_safe(dev, n, &pavgroup->baselist,
alias_list) {
spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
spin_unlock_irqrestore(
get_ccwdev_lock(dev->cdev), flags);
if (tbcpm < 0)
goto out_err;
paths |= tbcpm;
}
list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
alias_list) {
spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
spin_unlock_irqrestore(
get_ccwdev_lock(dev->cdev), flags);
if (tbcpm < 0)
goto out_err;
paths |= tbcpm;
}
}
/* notify user about all paths affected by CUIR action */
dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
return 0;
out_err:
return tbcpm;
}
static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
struct dasd_cuir_message *cuir)
{
struct dasd_eckd_private *private = device->private;
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_device *dev, *n;
unsigned long paths = 0;
int tbcpm;
/*
* the path may have been added through a generic path event before
* only trigger path verification if the path is not already in use
*/
list_for_each_entry_safe(dev, n,
&private->lcu->active_devices,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
if (!(dasd_path_get_opm(dev) & tbcpm)) {
dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
list_for_each_entry_safe(dev, n,
&private->lcu->inactive_devices,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
if (!(dasd_path_get_opm(dev) & tbcpm)) {
dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
/* devices in PAV groups */
list_for_each_entry_safe(pavgroup, tempgroup,
&private->lcu->grouplist,
group) {
list_for_each_entry_safe(dev, n,
&pavgroup->baselist,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
if (!(dasd_path_get_opm(dev) & tbcpm)) {
dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
list_for_each_entry_safe(dev, n,
&pavgroup->aliaslist,
alias_list) {
tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
paths |= tbcpm;
if (!(dasd_path_get_opm(dev) & tbcpm)) {
dasd_path_add_tbvpm(dev, tbcpm);
dasd_schedule_device_bh(dev);
}
}
}
/* notify user about all paths affected by CUIR action */
dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
return 0;
}
static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
__u8 lpum)
{
struct dasd_cuir_message *cuir = messages;
int response;
DBF_DEV_EVENT(DBF_WARNING, device,
"CUIR request: %016llx %016llx %016llx %08x",
((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
((u32 *)cuir)[3]);
if (cuir->code == CUIR_QUIESCE) {
/* quiesce */
if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
response = PSF_CUIR_LAST_PATH;
else
response = PSF_CUIR_COMPLETED;
} else if (cuir->code == CUIR_RESUME) {
/* resume */
dasd_eckd_cuir_resume(device, lpum, cuir);
response = PSF_CUIR_COMPLETED;
} else
response = PSF_CUIR_NOT_SUPPORTED;
dasd_eckd_psf_cuir_response(device, response,
cuir->message_id, lpum);
DBF_DEV_EVENT(DBF_WARNING, device,
"CUIR response: %d on message ID %08x", response,
cuir->message_id);
/* to make sure there is no attention left schedule work again */
device->discipline->check_attention(device, lpum);
}
static void dasd_eckd_oos_resume(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
struct alias_pav_group *pavgroup, *tempgroup;
struct dasd_device *dev, *n;
unsigned long flags;
spin_lock_irqsave(&private->lcu->lock, flags);
list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
alias_list) {
if (dev->stopped & DASD_STOPPED_NOSPC)
dasd_generic_space_avail(dev);
}
list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
alias_list) {
if (dev->stopped & DASD_STOPPED_NOSPC)
dasd_generic_space_avail(dev);
}
/* devices in PAV groups */
list_for_each_entry_safe(pavgroup, tempgroup,
&private->lcu->grouplist,
group) {
list_for_each_entry_safe(dev, n, &pavgroup->baselist,
alias_list) {
if (dev->stopped & DASD_STOPPED_NOSPC)
dasd_generic_space_avail(dev);
}
list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
alias_list) {
if (dev->stopped & DASD_STOPPED_NOSPC)
dasd_generic_space_avail(dev);
}
}
spin_unlock_irqrestore(&private->lcu->lock, flags);
}
static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages,
__u8 lpum)
{
struct dasd_oos_message *oos = messages;
switch (oos->code) {
case REPO_WARN:
case POOL_WARN:
dev_warn(&device->cdev->dev,
"Extent pool usage has reached a critical value\n");
dasd_eckd_oos_resume(device);
break;
case REPO_EXHAUST:
case POOL_EXHAUST:
dev_warn(&device->cdev->dev,
"Extent pool is exhausted\n");
break;
case REPO_RELIEVE:
case POOL_RELIEVE:
dev_info(&device->cdev->dev,
"Extent pool physical space constraint has been relieved\n");
break;
}
/* In any case, update related data */
dasd_eckd_read_ext_pool_info(device);
/* to make sure there is no attention left schedule work again */
device->discipline->check_attention(device, lpum);
}
static void dasd_eckd_check_attention_work(struct work_struct *work)
{
struct check_attention_work_data *data;
struct dasd_rssd_messages *messages;
struct dasd_device *device;
int rc;
data = container_of(work, struct check_attention_work_data, worker);
device = data->device;
messages = kzalloc(sizeof(*messages), GFP_KERNEL);
if (!messages) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate attention message buffer");
goto out;
}
rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
if (rc)
goto out;
if (messages->length == ATTENTION_LENGTH_CUIR &&
messages->format == ATTENTION_FORMAT_CUIR)
dasd_eckd_handle_cuir(device, messages, data->lpum);
if (messages->length == ATTENTION_LENGTH_OOS &&
messages->format == ATTENTION_FORMAT_OOS)
dasd_eckd_handle_oos(device, messages, data->lpum);
out:
dasd_put_device(device);
kfree(messages);
kfree(data);
}
static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
{
struct check_attention_work_data *data;
data = kzalloc(sizeof(*data), GFP_ATOMIC);
if (!data)
return -ENOMEM;
INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
dasd_get_device(device);
data->device = device;
data->lpum = lpum;
schedule_work(&data->worker);
return 0;
}
static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
{
if (~lpum & dasd_path_get_opm(device)) {
dasd_path_add_nohpfpm(device, lpum);
dasd_path_remove_opm(device, lpum);
dev_err(&device->cdev->dev,
"Channel path %02X lost HPF functionality and is disabled\n",
lpum);
return 1;
}
return 0;
}
static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
dev_err(&device->cdev->dev,
"High Performance FICON disabled\n");
private->fcx_max_data = 0;
}
static int dasd_eckd_hpf_enabled(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
return private->fcx_max_data ? 1 : 0;
}
static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
struct irb *irb)
{
struct dasd_eckd_private *private = device->private;
if (!private->fcx_max_data) {
/* sanity check for no HPF, the error makes no sense */
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Trying to disable HPF for a non HPF device");
return;
}
if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
dasd_eckd_disable_hpf_device(device);
} else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
return;
dasd_eckd_disable_hpf_device(device);
dasd_path_set_tbvpm(device,
dasd_path_get_hpfpm(device));
}
/*
* prevent that any new I/O ist started on the device and schedule a
* requeue of existing requests
*/
dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
dasd_schedule_requeue(device);
}
/*
* Initialize block layer request queue.
*/
static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
{
unsigned int logical_block_size = block->bp_block;
struct request_queue *q = block->gdp->queue;
struct dasd_device *device = block->base;
int max;
if (device->features & DASD_FEATURE_USERAW) {
/*
* the max_blocks value for raw_track access is 256
* it is higher than the native ECKD value because we
* only need one ccw per track
* so the max_hw_sectors are
* 2048 x 512B = 1024kB = 16 tracks
*/
max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
} else {
max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
}
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
q->limits.max_dev_sectors = max;
blk_queue_logical_block_size(q, logical_block_size);
blk_queue_max_hw_sectors(q, max);
blk_queue_max_segments(q, USHRT_MAX);
/* With page sized segments each segment can be translated into one idaw/tidaw */
blk_queue_max_segment_size(q, PAGE_SIZE);
blk_queue_segment_boundary(q, PAGE_SIZE - 1);
blk_queue_dma_alignment(q, PAGE_SIZE - 1);
}
static struct ccw_driver dasd_eckd_driver = {
.driver = {
.name = "dasd-eckd",
.owner = THIS_MODULE,
.dev_groups = dasd_dev_groups,
},
.ids = dasd_eckd_ids,
.probe = dasd_eckd_probe,
.remove = dasd_generic_remove,
.set_offline = dasd_generic_set_offline,
.set_online = dasd_eckd_set_online,
.notify = dasd_generic_notify,
.path_event = dasd_generic_path_event,
.shutdown = dasd_generic_shutdown,
.uc_handler = dasd_generic_uc_handler,
.int_class = IRQIO_DAS,
};
static struct dasd_discipline dasd_eckd_discipline = {
.owner = THIS_MODULE,
.name = "ECKD",
.ebcname = "ECKD",
.check_device = dasd_eckd_check_characteristics,
.uncheck_device = dasd_eckd_uncheck_device,
.do_analysis = dasd_eckd_do_analysis,
.pe_handler = dasd_eckd_pe_handler,
.basic_to_ready = dasd_eckd_basic_to_ready,
.online_to_ready = dasd_eckd_online_to_ready,
.basic_to_known = dasd_eckd_basic_to_known,
.setup_blk_queue = dasd_eckd_setup_blk_queue,
.fill_geometry = dasd_eckd_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
.handle_terminated_request = dasd_eckd_handle_terminated_request,
.format_device = dasd_eckd_format_device,
.check_device_format = dasd_eckd_check_device_format,
.erp_action = dasd_eckd_erp_action,
.erp_postaction = dasd_eckd_erp_postaction,
.check_for_device_change = dasd_eckd_check_for_device_change,
.build_cp = dasd_eckd_build_alias_cp,
.free_cp = dasd_eckd_free_alias_cp,
.dump_sense = dasd_eckd_dump_sense,
.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
.fill_info = dasd_eckd_fill_info,
.ioctl = dasd_eckd_ioctl,
.reload = dasd_eckd_reload_device,
.get_uid = dasd_eckd_get_uid,
.kick_validate = dasd_eckd_kick_validate_server,
.check_attention = dasd_eckd_check_attention,
.host_access_count = dasd_eckd_host_access_count,
.hosts_print = dasd_hosts_print,
.handle_hpf_error = dasd_eckd_handle_hpf_error,
.disable_hpf = dasd_eckd_disable_hpf_device,
.hpf_enabled = dasd_eckd_hpf_enabled,
.reset_path = dasd_eckd_reset_path,
.is_ese = dasd_eckd_is_ese,
.space_allocated = dasd_eckd_space_allocated,
.space_configured = dasd_eckd_space_configured,
.logical_capacity = dasd_eckd_logical_capacity,
.release_space = dasd_eckd_release_space,
.ext_pool_id = dasd_eckd_ext_pool_id,
.ext_size = dasd_eckd_ext_size,
.ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
.ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
.ext_pool_oos = dasd_eckd_ext_pool_oos,
.ext_pool_exhaust = dasd_eckd_ext_pool_exhaust,
.ese_format = dasd_eckd_ese_format,
.ese_read = dasd_eckd_ese_read,
.pprc_status = dasd_eckd_query_pprc_status,
.pprc_enabled = dasd_eckd_pprc_enabled,
.copy_pair_swap = dasd_eckd_copy_pair_swap,
.device_ping = dasd_eckd_device_ping,
};
static int __init
dasd_eckd_init(void)
{
int ret;
ASCEBC(dasd_eckd_discipline.ebcname, 4);
dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
GFP_KERNEL | GFP_DMA);
if (!dasd_reserve_req)
return -ENOMEM;
dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
GFP_KERNEL | GFP_DMA);
if (!dasd_vol_info_req) {
kfree(dasd_reserve_req);
return -ENOMEM;
}
pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
GFP_KERNEL | GFP_DMA);
if (!pe_handler_worker) {
kfree(dasd_reserve_req);
kfree(dasd_vol_info_req);
return -ENOMEM;
}
rawpadpage = (void *)__get_free_page(GFP_KERNEL);
if (!rawpadpage) {
kfree(pe_handler_worker);
kfree(dasd_reserve_req);
kfree(dasd_vol_info_req);
return -ENOMEM;
}
ret = ccw_driver_register(&dasd_eckd_driver);
if (!ret)
wait_for_device_probe();
else {
kfree(pe_handler_worker);
kfree(dasd_reserve_req);
kfree(dasd_vol_info_req);
free_page((unsigned long)rawpadpage);
}
return ret;
}
static void __exit
dasd_eckd_cleanup(void)
{
ccw_driver_unregister(&dasd_eckd_driver);
kfree(pe_handler_worker);
kfree(dasd_reserve_req);
free_page((unsigned long)rawpadpage);
}
module_init(dasd_eckd_init);
module_exit(dasd_eckd_cleanup);
| linux-master | drivers/s390/block/dasd_eckd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <[email protected]>
* Horst Hummel <[email protected]>
* Carsten Otte <[email protected]>
* Martin Schwidefsky <[email protected]>
* Bugreports.to..: <[email protected]>
* Copyright IBM Corp. 1999, 2001
*
* gendisk related functions for the dasd driver.
*
*/
#define KMSG_COMPONENT "dasd"
#include <linux/interrupt.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/blkpg.h>
#include <linux/uaccess.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd_gendisk:"
#include "dasd_int.h"
static unsigned int queue_depth = 32;
static unsigned int nr_hw_queues = 4;
module_param(queue_depth, uint, 0444);
MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
module_param(nr_hw_queues, uint, 0444);
MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
/*
* Allocate and register gendisk structure for device.
*/
int dasd_gendisk_alloc(struct dasd_block *block)
{
struct gendisk *gdp;
struct dasd_device *base;
int len, rc;
/* Make sure the minor for this device exists. */
base = block->base;
if (base->devindex >= DASD_PER_MAJOR)
return -EBUSY;
block->tag_set.ops = &dasd_mq_ops;
block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
block->tag_set.nr_hw_queues = nr_hw_queues;
block->tag_set.queue_depth = queue_depth;
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
block->tag_set.numa_node = NUMA_NO_NODE;
rc = blk_mq_alloc_tag_set(&block->tag_set);
if (rc)
return rc;
gdp = blk_mq_alloc_disk(&block->tag_set, block);
if (IS_ERR(gdp)) {
blk_mq_free_tag_set(&block->tag_set);
return PTR_ERR(gdp);
}
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
gdp->first_minor = base->devindex << DASD_PARTN_BITS;
gdp->minors = 1 << DASD_PARTN_BITS;
gdp->fops = &dasd_device_operations;
/*
* Set device name.
* dasda - dasdz : 26 devices
* dasdaa - dasdzz : 676 devices, added up = 702
* dasdaaa - dasdzzz : 17576 devices, added up = 18278
* dasdaaaa - dasdzzzz : 456976 devices, added up = 475252
*/
len = sprintf(gdp->disk_name, "dasd");
if (base->devindex > 25) {
if (base->devindex > 701) {
if (base->devindex > 18277)
len += sprintf(gdp->disk_name + len, "%c",
'a'+(((base->devindex-18278)
/17576)%26));
len += sprintf(gdp->disk_name + len, "%c",
'a'+(((base->devindex-702)/676)%26));
}
len += sprintf(gdp->disk_name + len, "%c",
'a'+(((base->devindex-26)/26)%26));
}
len += sprintf(gdp->disk_name + len, "%c", 'a'+(base->devindex%26));
if (base->features & DASD_FEATURE_READONLY ||
test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
set_disk_ro(gdp, 1);
dasd_add_link_to_gendisk(gdp, base);
block->gdp = gdp;
set_capacity(block->gdp, 0);
rc = device_add_disk(&base->cdev->dev, block->gdp, NULL);
if (rc) {
dasd_gendisk_free(block);
return rc;
}
return 0;
}
/*
* Unregister and free gendisk structure for device.
*/
void dasd_gendisk_free(struct dasd_block *block)
{
if (block->gdp) {
del_gendisk(block->gdp);
block->gdp->private_data = NULL;
put_disk(block->gdp);
block->gdp = NULL;
blk_mq_free_tag_set(&block->tag_set);
}
}
/*
* Trigger a partition detection.
*/
int dasd_scan_partitions(struct dasd_block *block)
{
struct block_device *bdev;
int rc;
bdev = blkdev_get_by_dev(disk_devt(block->gdp), BLK_OPEN_READ, NULL,
NULL);
if (IS_ERR(bdev)) {
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, blkdev_get returned %ld",
PTR_ERR(bdev));
return -ENODEV;
}
mutex_lock(&block->gdp->open_mutex);
rc = bdev_disk_changed(block->gdp, false);
mutex_unlock(&block->gdp->open_mutex);
if (rc)
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, rc %d", rc);
/*
* Since the matching blkdev_put call to the blkdev_get in
* this function is not called before dasd_destroy_partitions
* the offline open_count limit needs to be increased from
* 0 to 1. This is done by setting device->bdev (see
* dasd_generic_set_offline). As long as the partition
* detection is running no offline should be allowed. That
* is why the assignment to device->bdev is done AFTER
* the BLKRRPART ioctl.
*/
block->bdev = bdev;
return 0;
}
/*
* Remove all inodes in the system for a device, delete the
* partitions and make device unusable by setting its size to zero.
*/
void dasd_destroy_partitions(struct dasd_block *block)
{
struct block_device *bdev;
/*
* Get the bdev pointer from the device structure and clear
* device->bdev to lower the offline open_count limit again.
*/
bdev = block->bdev;
block->bdev = NULL;
mutex_lock(&bdev->bd_disk->open_mutex);
bdev_disk_changed(bdev->bd_disk, true);
mutex_unlock(&bdev->bd_disk->open_mutex);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
blkdev_put(bdev, NULL);
}
int dasd_gendisk_init(void)
{
int rc;
/* Register to static dasd major 94 */
rc = register_blkdev(DASD_MAJOR, "dasd");
if (rc != 0) {
pr_warn("Registering the device driver with major number %d failed\n",
DASD_MAJOR);
return rc;
}
return 0;
}
void dasd_gendisk_exit(void)
{
unregister_blkdev(DASD_MAJOR, "dasd");
}
| linux-master | drivers/s390/block/dasd_genhd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* dcssblk.c -- the S/390 block driver for dcss memory
*
* Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
*/
#define KMSG_COMPONENT "dcssblk"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/pfn_t.h>
#include <linux/uio.h>
#include <linux/dax.h>
#include <linux/io.h>
#include <asm/extmem.h>
#define DCSSBLK_NAME "dcssblk"
#define DCSSBLK_MINORS_PER_DISK 1
#define DCSSBLK_PARM_LEN 400
#define DCSS_BUS_ID_SIZE 20
static int dcssblk_open(struct gendisk *disk, blk_mode_t mode);
static void dcssblk_release(struct gendisk *disk);
static void dcssblk_submit_bio(struct bio *bio);
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
pfn_t *pfn);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
static int dcssblk_major;
static const struct block_device_operations dcssblk_devops = {
.owner = THIS_MODULE,
.submit_bio = dcssblk_submit_bio,
.open = dcssblk_open,
.release = dcssblk_release,
};
static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
pgoff_t pgoff, size_t nr_pages)
{
long rc;
void *kaddr;
rc = dax_direct_access(dax_dev, pgoff, nr_pages, DAX_ACCESS,
&kaddr, NULL);
if (rc < 0)
return dax_mem2blk_err(rc);
memset(kaddr, 0, nr_pages << PAGE_SHIFT);
dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
return 0;
}
static const struct dax_operations dcssblk_dax_ops = {
.direct_access = dcssblk_dax_direct_access,
.zero_page_range = dcssblk_dax_zero_page_range,
};
struct dcssblk_dev_info {
struct list_head lh;
struct device dev;
char segment_name[DCSS_BUS_ID_SIZE];
atomic_t use_count;
struct gendisk *gd;
unsigned long start;
unsigned long end;
int segment_type;
unsigned char save_pending;
unsigned char is_shared;
int num_of_segments;
struct list_head seg_list;
struct dax_device *dax_dev;
};
struct segment_info {
struct list_head lh;
char segment_name[DCSS_BUS_ID_SIZE];
unsigned long start;
unsigned long end;
int segment_type;
};
static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf,
size_t count);
static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf,
size_t count);
static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
static struct device *dcssblk_root_dev;
static LIST_HEAD(dcssblk_devices);
static struct rw_semaphore dcssblk_devices_sem;
/*
* release function for segment device.
*/
static void
dcssblk_release_segment(struct device *dev)
{
struct dcssblk_dev_info *dev_info;
struct segment_info *entry, *temp;
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) {
list_del(&entry->lh);
kfree(entry);
}
kfree(dev_info);
module_put(THIS_MODULE);
}
/*
* get a minor number. needs to be called with
* down_write(&dcssblk_devices_sem) and the
* device needs to be enqueued before the semaphore is
* freed.
*/
static int
dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
{
int minor, found;
struct dcssblk_dev_info *entry;
if (dev_info == NULL)
return -EINVAL;
for (minor = 0; minor < (1<<MINORBITS); minor++) {
found = 0;
// test if minor available
list_for_each_entry(entry, &dcssblk_devices, lh)
if (minor == entry->gd->first_minor)
found++;
if (!found) break; // got unused minor
}
if (found)
return -EBUSY;
dev_info->gd->first_minor = minor;
return 0;
}
/*
* get the struct dcssblk_dev_info from dcssblk_devices
* for the given name.
* down_read(&dcssblk_devices_sem) must be held.
*/
static struct dcssblk_dev_info *
dcssblk_get_device_by_name(char *name)
{
struct dcssblk_dev_info *entry;
list_for_each_entry(entry, &dcssblk_devices, lh) {
if (!strcmp(name, entry->segment_name)) {
return entry;
}
}
return NULL;
}
/*
* get the struct segment_info from seg_list
* for the given name.
* down_read(&dcssblk_devices_sem) must be held.
*/
static struct segment_info *
dcssblk_get_segment_by_name(char *name)
{
struct dcssblk_dev_info *dev_info;
struct segment_info *entry;
list_for_each_entry(dev_info, &dcssblk_devices, lh) {
list_for_each_entry(entry, &dev_info->seg_list, lh) {
if (!strcmp(name, entry->segment_name))
return entry;
}
}
return NULL;
}
/*
* get the highest address of the multi-segment block.
*/
static unsigned long
dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info)
{
unsigned long highest_addr;
struct segment_info *entry;
highest_addr = 0;
list_for_each_entry(entry, &dev_info->seg_list, lh) {
if (highest_addr < entry->end)
highest_addr = entry->end;
}
return highest_addr;
}
/*
* get the lowest address of the multi-segment block.
*/
static unsigned long
dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info)
{
int set_first;
unsigned long lowest_addr;
struct segment_info *entry;
set_first = 0;
lowest_addr = 0;
list_for_each_entry(entry, &dev_info->seg_list, lh) {
if (set_first == 0) {
lowest_addr = entry->start;
set_first = 1;
} else {
if (lowest_addr > entry->start)
lowest_addr = entry->start;
}
}
return lowest_addr;
}
/*
* Check continuity of segments.
*/
static int
dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
{
int i, j, rc;
struct segment_info *sort_list, *entry, temp;
if (dev_info->num_of_segments <= 1)
return 0;
sort_list = kcalloc(dev_info->num_of_segments,
sizeof(struct segment_info),
GFP_KERNEL);
if (sort_list == NULL)
return -ENOMEM;
i = 0;
list_for_each_entry(entry, &dev_info->seg_list, lh) {
memcpy(&sort_list[i], entry, sizeof(struct segment_info));
i++;
}
/* sort segments */
for (i = 0; i < dev_info->num_of_segments; i++)
for (j = 0; j < dev_info->num_of_segments; j++)
if (sort_list[j].start > sort_list[i].start) {
memcpy(&temp, &sort_list[i],
sizeof(struct segment_info));
memcpy(&sort_list[i], &sort_list[j],
sizeof(struct segment_info));
memcpy(&sort_list[j], &temp,
sizeof(struct segment_info));
}
/* check continuity */
for (i = 0; i < dev_info->num_of_segments - 1; i++) {
if ((sort_list[i].end + 1) != sort_list[i+1].start) {
pr_err("Adjacent DCSSs %s and %s are not "
"contiguous\n", sort_list[i].segment_name,
sort_list[i+1].segment_name);
rc = -EINVAL;
goto out;
}
/* EN and EW are allowed in a block device */
if (sort_list[i].segment_type != sort_list[i+1].segment_type) {
if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) ||
(sort_list[i].segment_type == SEG_TYPE_ER) ||
!(sort_list[i+1].segment_type &
SEGMENT_EXCLUSIVE) ||
(sort_list[i+1].segment_type == SEG_TYPE_ER)) {
pr_err("DCSS %s and DCSS %s have "
"incompatible types\n",
sort_list[i].segment_name,
sort_list[i+1].segment_name);
rc = -EINVAL;
goto out;
}
}
}
rc = 0;
out:
kfree(sort_list);
return rc;
}
/*
* Load a segment
*/
static int
dcssblk_load_segment(char *name, struct segment_info **seg_info)
{
int rc;
/* already loaded? */
down_read(&dcssblk_devices_sem);
*seg_info = dcssblk_get_segment_by_name(name);
up_read(&dcssblk_devices_sem);
if (*seg_info != NULL)
return -EEXIST;
/* get a struct segment_info */
*seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL);
if (*seg_info == NULL)
return -ENOMEM;
strcpy((*seg_info)->segment_name, name);
/* load the segment */
rc = segment_load(name, SEGMENT_SHARED,
&(*seg_info)->start, &(*seg_info)->end);
if (rc < 0) {
segment_warning(rc, (*seg_info)->segment_name);
kfree(*seg_info);
} else {
INIT_LIST_HEAD(&(*seg_info)->lh);
(*seg_info)->segment_type = rc;
}
return rc;
}
/*
* device attribute for switching shared/nonshared (exclusive)
* operation (show + store)
*/
static ssize_t
dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dcssblk_dev_info *dev_info;
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
}
static ssize_t
dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
{
struct dcssblk_dev_info *dev_info;
struct segment_info *entry, *temp;
int rc;
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
return -EINVAL;
down_write(&dcssblk_devices_sem);
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
if (atomic_read(&dev_info->use_count)) {
rc = -EBUSY;
goto out;
}
if (inbuf[0] == '1') {
/* reload segments in shared mode */
list_for_each_entry(entry, &dev_info->seg_list, lh) {
rc = segment_modify_shared(entry->segment_name,
SEGMENT_SHARED);
if (rc < 0) {
BUG_ON(rc == -EINVAL);
if (rc != -EAGAIN)
goto removeseg;
}
}
dev_info->is_shared = 1;
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
case SEG_TYPE_ER:
case SEG_TYPE_SC:
set_disk_ro(dev_info->gd, 1);
}
} else if (inbuf[0] == '0') {
/* reload segments in exclusive mode */
if (dev_info->segment_type == SEG_TYPE_SC) {
pr_err("DCSS %s is of type SC and cannot be "
"loaded as exclusive-writable\n",
dev_info->segment_name);
rc = -EINVAL;
goto out;
}
list_for_each_entry(entry, &dev_info->seg_list, lh) {
rc = segment_modify_shared(entry->segment_name,
SEGMENT_EXCLUSIVE);
if (rc < 0) {
BUG_ON(rc == -EINVAL);
if (rc != -EAGAIN)
goto removeseg;
}
}
dev_info->is_shared = 0;
set_disk_ro(dev_info->gd, 0);
} else {
rc = -EINVAL;
goto out;
}
rc = count;
goto out;
removeseg:
pr_err("DCSS device %s is removed after a failed access mode "
"change\n", dev_info->segment_name);
temp = entry;
list_for_each_entry(entry, &dev_info->seg_list, lh) {
if (entry != temp)
segment_unload(entry->segment_name);
}
list_del(&dev_info->lh);
up_write(&dcssblk_devices_sem);
dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
put_disk(dev_info->gd);
if (device_remove_file_self(dev, attr)) {
device_unregister(dev);
put_device(dev);
}
return rc;
out:
up_write(&dcssblk_devices_sem);
return rc;
}
static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
dcssblk_shared_store);
/*
* device attribute for save operation on current copy
* of the segment. If the segment is busy, saving will
* become pending until it gets released, which can be
* undone by storing a non-true value to this entry.
* (show + store)
*/
static ssize_t
dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct dcssblk_dev_info *dev_info;
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
}
static ssize_t
dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
{
struct dcssblk_dev_info *dev_info;
struct segment_info *entry;
if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
return -EINVAL;
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
down_write(&dcssblk_devices_sem);
if (inbuf[0] == '1') {
if (atomic_read(&dev_info->use_count) == 0) {
// device is idle => we save immediately
pr_info("All DCSSs that map to device %s are "
"saved\n", dev_info->segment_name);
list_for_each_entry(entry, &dev_info->seg_list, lh) {
if (entry->segment_type == SEG_TYPE_EN ||
entry->segment_type == SEG_TYPE_SN)
pr_warn("DCSS %s is of type SN or EN"
" and cannot be saved\n",
entry->segment_name);
else
segment_save(entry->segment_name);
}
} else {
// device is busy => we save it when it becomes
// idle in dcssblk_release
pr_info("Device %s is in use, its DCSSs will be "
"saved when it becomes idle\n",
dev_info->segment_name);
dev_info->save_pending = 1;
}
} else if (inbuf[0] == '0') {
if (dev_info->save_pending) {
// device is busy & the user wants to undo his save
// request
dev_info->save_pending = 0;
pr_info("A pending save request for device %s "
"has been canceled\n",
dev_info->segment_name);
}
} else {
up_write(&dcssblk_devices_sem);
return -EINVAL;
}
up_write(&dcssblk_devices_sem);
return count;
}
static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
dcssblk_save_store);
/*
* device attribute for showing all segments in a device
*/
static ssize_t
dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int i;
struct dcssblk_dev_info *dev_info;
struct segment_info *entry;
down_read(&dcssblk_devices_sem);
dev_info = container_of(dev, struct dcssblk_dev_info, dev);
i = 0;
buf[0] = '\0';
list_for_each_entry(entry, &dev_info->seg_list, lh) {
strcpy(&buf[i], entry->segment_name);
i += strlen(entry->segment_name);
buf[i] = '\n';
i++;
}
up_read(&dcssblk_devices_sem);
return i;
}
static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
static struct attribute *dcssblk_dev_attrs[] = {
&dev_attr_shared.attr,
&dev_attr_save.attr,
&dev_attr_seglist.attr,
NULL,
};
static struct attribute_group dcssblk_dev_attr_group = {
.attrs = dcssblk_dev_attrs,
};
static const struct attribute_group *dcssblk_dev_attr_groups[] = {
&dcssblk_dev_attr_group,
NULL,
};
/*
* device attribute for adding devices
*/
static ssize_t
dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
int rc, i, j, num_of_segments;
struct dcssblk_dev_info *dev_info;
struct segment_info *seg_info, *temp;
char *local_buf;
unsigned long seg_byte_size;
dev_info = NULL;
seg_info = NULL;
if (dev != dcssblk_root_dev) {
rc = -EINVAL;
goto out_nobuf;
}
if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) {
rc = -ENAMETOOLONG;
goto out_nobuf;
}
local_buf = kmalloc(count + 1, GFP_KERNEL);
if (local_buf == NULL) {
rc = -ENOMEM;
goto out_nobuf;
}
/*
* parse input
*/
num_of_segments = 0;
for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) {
for (j = i; j < count &&
(buf[j] != ':') &&
(buf[j] != '\0') &&
(buf[j] != '\n'); j++) {
local_buf[j-i] = toupper(buf[j]);
}
local_buf[j-i] = '\0';
if (((j - i) == 0) || ((j - i) > 8)) {
rc = -ENAMETOOLONG;
goto seg_list_del;
}
rc = dcssblk_load_segment(local_buf, &seg_info);
if (rc < 0)
goto seg_list_del;
/*
* get a struct dcssblk_dev_info
*/
if (num_of_segments == 0) {
dev_info = kzalloc(sizeof(struct dcssblk_dev_info),
GFP_KERNEL);
if (dev_info == NULL) {
rc = -ENOMEM;
goto out;
}
strcpy(dev_info->segment_name, local_buf);
dev_info->segment_type = seg_info->segment_type;
INIT_LIST_HEAD(&dev_info->seg_list);
}
list_add_tail(&seg_info->lh, &dev_info->seg_list);
num_of_segments++;
i = j;
if ((buf[j] == '\0') || (buf[j] == '\n'))
break;
}
/* no trailing colon at the end of the input */
if ((i > 0) && (buf[i-1] == ':')) {
rc = -ENAMETOOLONG;
goto seg_list_del;
}
strscpy(local_buf, buf, i + 1);
dev_info->num_of_segments = num_of_segments;
rc = dcssblk_is_continuous(dev_info);
if (rc < 0)
goto seg_list_del;
dev_info->start = dcssblk_find_lowest_addr(dev_info);
dev_info->end = dcssblk_find_highest_addr(dev_info);
dev_set_name(&dev_info->dev, "%s", dev_info->segment_name);
dev_info->dev.release = dcssblk_release_segment;
dev_info->dev.groups = dcssblk_dev_attr_groups;
INIT_LIST_HEAD(&dev_info->lh);
dev_info->gd = blk_alloc_disk(NUMA_NO_NODE);
if (dev_info->gd == NULL) {
rc = -ENOMEM;
goto seg_list_del;
}
dev_info->gd->major = dcssblk_major;
dev_info->gd->minors = DCSSBLK_MINORS_PER_DISK;
dev_info->gd->fops = &dcssblk_devops;
dev_info->gd->private_data = dev_info;
dev_info->gd->flags |= GENHD_FL_NO_PART;
blk_queue_logical_block_size(dev_info->gd->queue, 4096);
blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue);
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
pr_info("Loaded %s with total size %lu bytes and capacity %lu "
"sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9);
dev_info->save_pending = 0;
dev_info->is_shared = 1;
dev_info->dev.parent = dcssblk_root_dev;
/*
*get minor, add to list
*/
down_write(&dcssblk_devices_sem);
if (dcssblk_get_segment_by_name(local_buf)) {
rc = -EEXIST;
goto release_gd;
}
rc = dcssblk_assign_free_minor(dev_info);
if (rc)
goto release_gd;
sprintf(dev_info->gd->disk_name, "dcssblk%d",
dev_info->gd->first_minor);
list_add_tail(&dev_info->lh, &dcssblk_devices);
if (!try_module_get(THIS_MODULE)) {
rc = -ENODEV;
goto dev_list_del;
}
/*
* register the device
*/
rc = device_register(&dev_info->dev);
if (rc)
goto put_dev;
dev_info->dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
if (IS_ERR(dev_info->dax_dev)) {
rc = PTR_ERR(dev_info->dax_dev);
dev_info->dax_dev = NULL;
goto put_dev;
}
set_dax_synchronous(dev_info->dax_dev);
rc = dax_add_host(dev_info->dax_dev, dev_info->gd);
if (rc)
goto out_dax;
get_device(&dev_info->dev);
rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL);
if (rc)
goto out_dax_host;
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
case SEG_TYPE_ER:
case SEG_TYPE_SC:
set_disk_ro(dev_info->gd,1);
break;
default:
set_disk_ro(dev_info->gd,0);
break;
}
up_write(&dcssblk_devices_sem);
rc = count;
goto out;
out_dax_host:
put_device(&dev_info->dev);
dax_remove_host(dev_info->gd);
out_dax:
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
put_dev:
list_del(&dev_info->lh);
put_disk(dev_info->gd);
list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
segment_unload(seg_info->segment_name);
}
put_device(&dev_info->dev);
up_write(&dcssblk_devices_sem);
goto out;
dev_list_del:
list_del(&dev_info->lh);
release_gd:
put_disk(dev_info->gd);
up_write(&dcssblk_devices_sem);
seg_list_del:
if (dev_info == NULL)
goto out;
list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) {
list_del(&seg_info->lh);
segment_unload(seg_info->segment_name);
kfree(seg_info);
}
kfree(dev_info);
out:
kfree(local_buf);
out_nobuf:
return rc;
}
/*
* device attribute for removing devices
*/
static ssize_t
dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct dcssblk_dev_info *dev_info;
struct segment_info *entry;
int rc, i;
char *local_buf;
if (dev != dcssblk_root_dev) {
return -EINVAL;
}
local_buf = kmalloc(count + 1, GFP_KERNEL);
if (local_buf == NULL) {
return -ENOMEM;
}
/*
* parse input
*/
for (i = 0; (i < count && (*(buf+i)!='\0') && (*(buf+i)!='\n')); i++) {
local_buf[i] = toupper(buf[i]);
}
local_buf[i] = '\0';
if ((i == 0) || (i > 8)) {
rc = -ENAMETOOLONG;
goto out_buf;
}
down_write(&dcssblk_devices_sem);
dev_info = dcssblk_get_device_by_name(local_buf);
if (dev_info == NULL) {
up_write(&dcssblk_devices_sem);
pr_warn("Device %s cannot be removed because it is not a known device\n",
local_buf);
rc = -ENODEV;
goto out_buf;
}
if (atomic_read(&dev_info->use_count) != 0) {
up_write(&dcssblk_devices_sem);
pr_warn("Device %s cannot be removed while it is in use\n",
local_buf);
rc = -EBUSY;
goto out_buf;
}
list_del(&dev_info->lh);
/* unload all related segments */
list_for_each_entry(entry, &dev_info->seg_list, lh)
segment_unload(entry->segment_name);
up_write(&dcssblk_devices_sem);
dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
put_disk(dev_info->gd);
device_unregister(&dev_info->dev);
put_device(&dev_info->dev);
rc = count;
out_buf:
kfree(local_buf);
return rc;
}
static int
dcssblk_open(struct gendisk *disk, blk_mode_t mode)
{
struct dcssblk_dev_info *dev_info = disk->private_data;
int rc;
if (NULL == dev_info) {
rc = -ENODEV;
goto out;
}
atomic_inc(&dev_info->use_count);
rc = 0;
out:
return rc;
}
static void
dcssblk_release(struct gendisk *disk)
{
struct dcssblk_dev_info *dev_info = disk->private_data;
struct segment_info *entry;
if (!dev_info) {
WARN_ON(1);
return;
}
down_write(&dcssblk_devices_sem);
if (atomic_dec_and_test(&dev_info->use_count)
&& (dev_info->save_pending)) {
pr_info("Device %s has become idle and is being saved "
"now\n", dev_info->segment_name);
list_for_each_entry(entry, &dev_info->seg_list, lh) {
if (entry->segment_type == SEG_TYPE_EN ||
entry->segment_type == SEG_TYPE_SN)
pr_warn("DCSS %s is of type SN or EN and cannot"
" be saved\n", entry->segment_name);
else
segment_save(entry->segment_name);
}
dev_info->save_pending = 0;
}
up_write(&dcssblk_devices_sem);
}
static void
dcssblk_submit_bio(struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
struct bio_vec bvec;
struct bvec_iter iter;
unsigned long index;
void *page_addr;
unsigned long source_addr;
unsigned long bytes_done;
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
if (dev_info == NULL)
goto fail;
if (!IS_ALIGNED(bio->bi_iter.bi_sector, 8) ||
!IS_ALIGNED(bio->bi_iter.bi_size, PAGE_SIZE))
/* Request is not page-aligned. */
goto fail;
/* verify data transfer direction */
if (dev_info->is_shared) {
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
case SEG_TYPE_ER:
case SEG_TYPE_SC:
/* cannot write to these segments */
if (bio_data_dir(bio) == WRITE) {
pr_warn("Writing to %s failed because it is a read-only device\n",
dev_name(&dev_info->dev));
goto fail;
}
}
}
index = (bio->bi_iter.bi_sector >> 3);
bio_for_each_segment(bvec, bio, iter) {
page_addr = bvec_virt(&bvec);
source_addr = dev_info->start + (index<<12) + bytes_done;
if (unlikely(!IS_ALIGNED((unsigned long)page_addr, PAGE_SIZE) ||
!IS_ALIGNED(bvec.bv_len, PAGE_SIZE)))
// More paranoia.
goto fail;
if (bio_data_dir(bio) == READ)
memcpy(page_addr, __va(source_addr), bvec.bv_len);
else
memcpy(__va(source_addr), page_addr, bvec.bv_len);
bytes_done += bvec.bv_len;
}
bio_endio(bio);
return;
fail:
bio_io_error(bio);
}
static long
__dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn)
{
resource_size_t offset = pgoff * PAGE_SIZE;
unsigned long dev_sz;
dev_sz = dev_info->end - dev_info->start + 1;
if (kaddr)
*kaddr = (void *) dev_info->start + offset;
if (pfn)
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
PFN_DEV|PFN_SPECIAL);
return (dev_sz - offset) / PAGE_SIZE;
}
static long
dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
pfn_t *pfn)
{
struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev);
return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn);
}
static void
dcssblk_check_params(void)
{
int rc, i, j, k;
char buf[DCSSBLK_PARM_LEN + 1];
struct dcssblk_dev_info *dev_info;
for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
i++) {
for (j = i; (j < DCSSBLK_PARM_LEN) &&
(dcssblk_segments[j] != ',') &&
(dcssblk_segments[j] != '\0') &&
(dcssblk_segments[j] != '('); j++)
{
buf[j-i] = dcssblk_segments[j];
}
buf[j-i] = '\0';
rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i);
if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++)
buf[k] = toupper(buf[k]);
buf[k] = '\0';
if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
down_read(&dcssblk_devices_sem);
dev_info = dcssblk_get_device_by_name(buf);
up_read(&dcssblk_devices_sem);
if (dev_info)
dcssblk_shared_store(&dev_info->dev,
NULL, "0\n", 2);
}
}
while ((dcssblk_segments[j] != ',') &&
(dcssblk_segments[j] != '\0'))
{
j++;
}
if (dcssblk_segments[j] == '\0')
break;
i = j;
}
}
/*
* The init/exit functions.
*/
static void __exit
dcssblk_exit(void)
{
root_device_unregister(dcssblk_root_dev);
unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
}
static int __init
dcssblk_init(void)
{
int rc;
dcssblk_root_dev = root_device_register("dcssblk");
if (IS_ERR(dcssblk_root_dev))
return PTR_ERR(dcssblk_root_dev);
rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
if (rc)
goto out_root;
rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
if (rc)
goto out_root;
rc = register_blkdev(0, DCSSBLK_NAME);
if (rc < 0)
goto out_root;
dcssblk_major = rc;
init_rwsem(&dcssblk_devices_sem);
dcssblk_check_params();
return 0;
out_root:
root_device_unregister(dcssblk_root_dev);
return rc;
}
module_init(dcssblk_init);
module_exit(dcssblk_exit);
module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
"comma-separated list, names in each set separated "
"by commas are separated by colons, each set contains "
"names of contiguous segments and each name max. 8 chars.\n"
"Adding \"(local)\" to the end of each set equals echoing 0 "
"to /sys/devices/dcssblk/<device name>/shared after loading "
"the contiguous segments - \n"
"e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\"");
MODULE_LICENSE("GPL");
| linux-master | drivers/s390/block/dcssblk.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <[email protected]>
* Horst Hummel <[email protected]>
* Carsten Otte <[email protected]>
* Martin Schwidefsky <[email protected]>
* Bugreports.to..: <[email protected]>
* Copyright IBM Corp. 1999, 2009
*/
#define KMSG_COMPONENT "dasd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/async.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
#include <asm/idals.h>
#include <asm/itcw.h>
#include <asm/diag.h>
/* This is ugly... */
#define PRINTK_HEADER "dasd:"
#include "dasd_int.h"
/*
* SECTION: Constant definitions to be used within this file
*/
#define DASD_CHANQ_MAX_SIZE 4
#define DASD_DIAG_MOD "dasd_diag_mod"
/*
* SECTION: exported variables of dasd.c
*/
debug_info_t *dasd_debug_area;
EXPORT_SYMBOL(dasd_debug_area);
static struct dentry *dasd_debugfs_root_entry;
struct dasd_discipline *dasd_diag_discipline_pointer;
EXPORT_SYMBOL(dasd_diag_discipline_pointer);
void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
MODULE_AUTHOR("Holger Smolinski <[email protected]>");
MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
" Copyright IBM Corp. 2000");
MODULE_LICENSE("GPL");
/*
* SECTION: prototypes for static functions of dasd.c
*/
static int dasd_flush_block_queue(struct dasd_block *);
static void dasd_device_tasklet(unsigned long);
static void dasd_block_tasklet(unsigned long);
static void do_kick_device(struct work_struct *);
static void do_reload_device(struct work_struct *);
static void do_requeue_requests(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
static void dasd_device_timeout(struct timer_list *);
static void dasd_block_timeout(struct timer_list *);
static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
static void dasd_profile_init(struct dasd_profile *, struct dentry *);
static void dasd_profile_exit(struct dasd_profile *);
static void dasd_hosts_init(struct dentry *, struct dasd_device *);
static void dasd_hosts_exit(struct dasd_device *);
static int dasd_handle_autoquiesce(struct dasd_device *, struct dasd_ccw_req *,
unsigned int);
/*
* SECTION: Operations on the device structure.
*/
static wait_queue_head_t dasd_init_waitq;
static wait_queue_head_t dasd_flush_wq;
static wait_queue_head_t generic_waitq;
static wait_queue_head_t shutdown_waitq;
/*
* Allocate memory for a new device structure.
*/
struct dasd_device *dasd_alloc_device(void)
{
struct dasd_device *device;
device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
if (!device)
return ERR_PTR(-ENOMEM);
/* Get two pages for normal block device operations. */
device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
if (!device->ccw_mem) {
kfree(device);
return ERR_PTR(-ENOMEM);
}
/* Get one page for error recovery. */
device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
if (!device->erp_mem) {
free_pages((unsigned long) device->ccw_mem, 1);
kfree(device);
return ERR_PTR(-ENOMEM);
}
/* Get two pages for ese format. */
device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
if (!device->ese_mem) {
free_page((unsigned long) device->erp_mem);
free_pages((unsigned long) device->ccw_mem, 1);
kfree(device);
return ERR_PTR(-ENOMEM);
}
dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
spin_lock_init(&device->mem_lock);
atomic_set(&device->tasklet_scheduled, 0);
tasklet_init(&device->tasklet, dasd_device_tasklet,
(unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue);
timer_setup(&device->timer, dasd_device_timeout, 0);
INIT_WORK(&device->kick_work, do_kick_device);
INIT_WORK(&device->reload_device, do_reload_device);
INIT_WORK(&device->requeue_requests, do_requeue_requests);
device->state = DASD_STATE_NEW;
device->target = DASD_STATE_NEW;
mutex_init(&device->state_mutex);
spin_lock_init(&device->profile.lock);
return device;
}
/*
* Free memory of a device structure.
*/
void dasd_free_device(struct dasd_device *device)
{
kfree(device->private);
free_pages((unsigned long) device->ese_mem, 1);
free_page((unsigned long) device->erp_mem);
free_pages((unsigned long) device->ccw_mem, 1);
kfree(device);
}
/*
* Allocate memory for a new device structure.
*/
struct dasd_block *dasd_alloc_block(void)
{
struct dasd_block *block;
block = kzalloc(sizeof(*block), GFP_ATOMIC);
if (!block)
return ERR_PTR(-ENOMEM);
/* open_count = 0 means device online but not in use */
atomic_set(&block->open_count, -1);
atomic_set(&block->tasklet_scheduled, 0);
tasklet_init(&block->tasklet, dasd_block_tasklet,
(unsigned long) block);
INIT_LIST_HEAD(&block->ccw_queue);
spin_lock_init(&block->queue_lock);
INIT_LIST_HEAD(&block->format_list);
spin_lock_init(&block->format_lock);
timer_setup(&block->timer, dasd_block_timeout, 0);
spin_lock_init(&block->profile.lock);
return block;
}
EXPORT_SYMBOL_GPL(dasd_alloc_block);
/*
* Free memory of a device structure.
*/
void dasd_free_block(struct dasd_block *block)
{
kfree(block);
}
EXPORT_SYMBOL_GPL(dasd_free_block);
/*
* Make a new device known to the system.
*/
static int dasd_state_new_to_known(struct dasd_device *device)
{
/*
* As long as the device is not in state DASD_STATE_NEW we want to
* keep the reference count > 0.
*/
dasd_get_device(device);
device->state = DASD_STATE_KNOWN;
return 0;
}
/*
* Let the system forget about a device.
*/
static int dasd_state_known_to_new(struct dasd_device *device)
{
/* Disable extended error reporting for this device. */
dasd_eer_disable(device);
device->state = DASD_STATE_NEW;
/* Give up reference we took in dasd_state_new_to_known. */
dasd_put_device(device);
return 0;
}
static struct dentry *dasd_debugfs_setup(const char *name,
struct dentry *base_dentry)
{
struct dentry *pde;
if (!base_dentry)
return NULL;
pde = debugfs_create_dir(name, base_dentry);
if (!pde || IS_ERR(pde))
return NULL;
return pde;
}
/*
* Request the irq line for the device.
*/
static int dasd_state_known_to_basic(struct dasd_device *device)
{
struct dasd_block *block = device->block;
int rc = 0;
/* Allocate and register gendisk structure. */
if (block) {
rc = dasd_gendisk_alloc(block);
if (rc)
return rc;
block->debugfs_dentry =
dasd_debugfs_setup(block->gdp->disk_name,
dasd_debugfs_root_entry);
dasd_profile_init(&block->profile, block->debugfs_dentry);
if (dasd_global_profile_level == DASD_PROFILE_ON)
dasd_profile_on(&device->block->profile);
}
device->debugfs_dentry =
dasd_debugfs_setup(dev_name(&device->cdev->dev),
dasd_debugfs_root_entry);
dasd_profile_init(&device->profile, device->debugfs_dentry);
dasd_hosts_init(device->debugfs_dentry, device);
/* register 'device' debug area, used for all DBF_DEV_XXX calls */
device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
8 * sizeof(long));
debug_register_view(device->debug_area, &debug_sprintf_view);
debug_set_level(device->debug_area, DBF_WARNING);
DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
device->state = DASD_STATE_BASIC;
return rc;
}
/*
* Release the irq line for the device. Terminate any running i/o.
*/
static int dasd_state_basic_to_known(struct dasd_device *device)
{
int rc;
if (device->discipline->basic_to_known) {
rc = device->discipline->basic_to_known(device);
if (rc)
return rc;
}
if (device->block) {
dasd_profile_exit(&device->block->profile);
debugfs_remove(device->block->debugfs_dentry);
dasd_gendisk_free(device->block);
dasd_block_clear_timer(device->block);
}
rc = dasd_flush_device_queue(device);
if (rc)
return rc;
dasd_device_clear_timer(device);
dasd_profile_exit(&device->profile);
dasd_hosts_exit(device);
debugfs_remove(device->debugfs_dentry);
DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
if (device->debug_area != NULL) {
debug_unregister(device->debug_area);
device->debug_area = NULL;
}
device->state = DASD_STATE_KNOWN;
return 0;
}
/*
* Do the initial analysis. The do_analysis function may return
* -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
* until the discipline decides to continue the startup sequence
* by calling the function dasd_change_state. The eckd disciplines
* uses this to start a ccw that detects the format. The completion
* interrupt for this detection ccw uses the kernel event daemon to
* trigger the call to dasd_change_state. All this is done in the
* discipline code, see dasd_eckd.c.
* After the analysis ccw is done (do_analysis returned 0) the block
* device is setup.
* In case the analysis returns an error, the device setup is stopped
* (a fake disk was already added to allow formatting).
*/
static int dasd_state_basic_to_ready(struct dasd_device *device)
{
int rc;
struct dasd_block *block;
struct gendisk *disk;
rc = 0;
block = device->block;
/* make disk known with correct capacity */
if (block) {
if (block->base->discipline->do_analysis != NULL)
rc = block->base->discipline->do_analysis(block);
if (rc) {
if (rc != -EAGAIN) {
device->state = DASD_STATE_UNFMT;
disk = device->block->gdp;
kobject_uevent(&disk_to_dev(disk)->kobj,
KOBJ_CHANGE);
goto out;
}
return rc;
}
if (device->discipline->setup_blk_queue)
device->discipline->setup_blk_queue(block);
set_capacity(block->gdp,
block->blocks << block->s2b_shift);
device->state = DASD_STATE_READY;
rc = dasd_scan_partitions(block);
if (rc) {
device->state = DASD_STATE_BASIC;
return rc;
}
} else {
device->state = DASD_STATE_READY;
}
out:
if (device->discipline->basic_to_ready)
rc = device->discipline->basic_to_ready(device);
return rc;
}
static inline
int _wait_for_empty_queues(struct dasd_device *device)
{
if (device->block)
return list_empty(&device->ccw_queue) &&
list_empty(&device->block->ccw_queue);
else
return list_empty(&device->ccw_queue);
}
/*
* Remove device from block device layer. Destroy dirty buffers.
* Forget format information. Check if the target level is basic
* and if it is create fake disk for formatting.
*/
static int dasd_state_ready_to_basic(struct dasd_device *device)
{
int rc;
device->state = DASD_STATE_BASIC;
if (device->block) {
struct dasd_block *block = device->block;
rc = dasd_flush_block_queue(block);
if (rc) {
device->state = DASD_STATE_READY;
return rc;
}
dasd_destroy_partitions(block);
block->blocks = 0;
block->bp_block = 0;
block->s2b_shift = 0;
}
return 0;
}
/*
* Back to basic.
*/
static int dasd_state_unfmt_to_basic(struct dasd_device *device)
{
device->state = DASD_STATE_BASIC;
return 0;
}
/*
* Make the device online and schedule the bottom half to start
* the requeueing of requests from the linux request queue to the
* ccw queue.
*/
static int
dasd_state_ready_to_online(struct dasd_device * device)
{
device->state = DASD_STATE_ONLINE;
if (device->block) {
dasd_schedule_block_bh(device->block);
if ((device->features & DASD_FEATURE_USERAW)) {
kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
KOBJ_CHANGE);
return 0;
}
disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
}
return 0;
}
/*
* Stop the requeueing of requests again.
*/
static int dasd_state_online_to_ready(struct dasd_device *device)
{
int rc;
if (device->discipline->online_to_ready) {
rc = device->discipline->online_to_ready(device);
if (rc)
return rc;
}
device->state = DASD_STATE_READY;
if (device->block && !(device->features & DASD_FEATURE_USERAW))
disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE);
return 0;
}
/*
* Device startup state changes.
*/
static int dasd_increase_state(struct dasd_device *device)
{
int rc;
rc = 0;
if (device->state == DASD_STATE_NEW &&
device->target >= DASD_STATE_KNOWN)
rc = dasd_state_new_to_known(device);
if (!rc &&
device->state == DASD_STATE_KNOWN &&
device->target >= DASD_STATE_BASIC)
rc = dasd_state_known_to_basic(device);
if (!rc &&
device->state == DASD_STATE_BASIC &&
device->target >= DASD_STATE_READY)
rc = dasd_state_basic_to_ready(device);
if (!rc &&
device->state == DASD_STATE_UNFMT &&
device->target > DASD_STATE_UNFMT)
rc = -EPERM;
if (!rc &&
device->state == DASD_STATE_READY &&
device->target >= DASD_STATE_ONLINE)
rc = dasd_state_ready_to_online(device);
return rc;
}
/*
* Device shutdown state changes.
*/
static int dasd_decrease_state(struct dasd_device *device)
{
int rc;
rc = 0;
if (device->state == DASD_STATE_ONLINE &&
device->target <= DASD_STATE_READY)
rc = dasd_state_online_to_ready(device);
if (!rc &&
device->state == DASD_STATE_READY &&
device->target <= DASD_STATE_BASIC)
rc = dasd_state_ready_to_basic(device);
if (!rc &&
device->state == DASD_STATE_UNFMT &&
device->target <= DASD_STATE_BASIC)
rc = dasd_state_unfmt_to_basic(device);
if (!rc &&
device->state == DASD_STATE_BASIC &&
device->target <= DASD_STATE_KNOWN)
rc = dasd_state_basic_to_known(device);
if (!rc &&
device->state == DASD_STATE_KNOWN &&
device->target <= DASD_STATE_NEW)
rc = dasd_state_known_to_new(device);
return rc;
}
/*
* This is the main startup/shutdown routine.
*/
static void dasd_change_state(struct dasd_device *device)
{
int rc;
if (device->state == device->target)
/* Already where we want to go today... */
return;
if (device->state < device->target)
rc = dasd_increase_state(device);
else
rc = dasd_decrease_state(device);
if (rc == -EAGAIN)
return;
if (rc)
device->target = device->state;
/* let user-space know that the device status changed */
kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
if (device->state == device->target)
wake_up(&dasd_init_waitq);
}
/*
* Kick starter for devices that did not complete the startup/shutdown
* procedure or were sleeping because of a pending state.
* dasd_kick_device will schedule a call do do_kick_device to the kernel
* event daemon.
*/
static void do_kick_device(struct work_struct *work)
{
struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
mutex_lock(&device->state_mutex);
dasd_change_state(device);
mutex_unlock(&device->state_mutex);
dasd_schedule_device_bh(device);
dasd_put_device(device);
}
void dasd_kick_device(struct dasd_device *device)
{
dasd_get_device(device);
/* queue call to dasd_kick_device to the kernel event daemon. */
if (!schedule_work(&device->kick_work))
dasd_put_device(device);
}
EXPORT_SYMBOL(dasd_kick_device);
/*
* dasd_reload_device will schedule a call do do_reload_device to the kernel
* event daemon.
*/
static void do_reload_device(struct work_struct *work)
{
struct dasd_device *device = container_of(work, struct dasd_device,
reload_device);
device->discipline->reload(device);
dasd_put_device(device);
}
void dasd_reload_device(struct dasd_device *device)
{
dasd_get_device(device);
/* queue call to dasd_reload_device to the kernel event daemon. */
if (!schedule_work(&device->reload_device))
dasd_put_device(device);
}
EXPORT_SYMBOL(dasd_reload_device);
/*
* Set the target state for a device and starts the state change.
*/
void dasd_set_target_state(struct dasd_device *device, int target)
{
dasd_get_device(device);
mutex_lock(&device->state_mutex);
/* If we are in probeonly mode stop at DASD_STATE_READY. */
if (dasd_probeonly && target > DASD_STATE_READY)
target = DASD_STATE_READY;
if (device->target != target) {
if (device->state == target)
wake_up(&dasd_init_waitq);
device->target = target;
}
if (device->state != device->target)
dasd_change_state(device);
mutex_unlock(&device->state_mutex);
dasd_put_device(device);
}
/*
* Enable devices with device numbers in [from..to].
*/
static inline int _wait_for_device(struct dasd_device *device)
{
return (device->state == device->target);
}
void dasd_enable_device(struct dasd_device *device)
{
dasd_set_target_state(device, DASD_STATE_ONLINE);
if (device->state <= DASD_STATE_KNOWN)
/* No discipline for device found. */
dasd_set_target_state(device, DASD_STATE_NEW);
/* Now wait for the devices to come up. */
wait_event(dasd_init_waitq, _wait_for_device(device));
dasd_reload_device(device);
if (device->discipline->kick_validate)
device->discipline->kick_validate(device);
}
EXPORT_SYMBOL(dasd_enable_device);
/*
* SECTION: device operation (interrupt handler, start i/o, term i/o ...)
*/
unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
#ifdef CONFIG_DASD_PROFILE
struct dasd_profile dasd_global_profile = {
.lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
};
static struct dentry *dasd_debugfs_global_entry;
/*
* Add profiling information for cqr before execution.
*/
static void dasd_profile_start(struct dasd_block *block,
struct dasd_ccw_req *cqr,
struct request *req)
{
struct list_head *l;
unsigned int counter;
struct dasd_device *device;
/* count the length of the chanq for statistics */
counter = 0;
if (dasd_global_profile_level || block->profile.data)
list_for_each(l, &block->ccw_queue)
if (++counter >= 31)
break;
spin_lock(&dasd_global_profile.lock);
if (dasd_global_profile.data) {
dasd_global_profile.data->dasd_io_nr_req[counter]++;
if (rq_data_dir(req) == READ)
dasd_global_profile.data->dasd_read_nr_req[counter]++;
}
spin_unlock(&dasd_global_profile.lock);
spin_lock(&block->profile.lock);
if (block->profile.data) {
block->profile.data->dasd_io_nr_req[counter]++;
if (rq_data_dir(req) == READ)
block->profile.data->dasd_read_nr_req[counter]++;
}
spin_unlock(&block->profile.lock);
/*
* We count the request for the start device, even though it may run on
* some other device due to error recovery. This way we make sure that
* we count each request only once.
*/
device = cqr->startdev;
if (device->profile.data) {
counter = 1; /* request is not yet queued on the start device */
list_for_each(l, &device->ccw_queue)
if (++counter >= 31)
break;
}
spin_lock(&device->profile.lock);
if (device->profile.data) {
device->profile.data->dasd_io_nr_req[counter]++;
if (rq_data_dir(req) == READ)
device->profile.data->dasd_read_nr_req[counter]++;
}
spin_unlock(&device->profile.lock);
}
/*
* Add profiling information for cqr after execution.
*/
#define dasd_profile_counter(value, index) \
{ \
for (index = 0; index < 31 && value >> (2+index); index++) \
; \
}
static void dasd_profile_end_add_data(struct dasd_profile_info *data,
int is_alias,
int is_tpm,
int is_read,
long sectors,
int sectors_ind,
int tottime_ind,
int tottimeps_ind,
int strtime_ind,
int irqtime_ind,
int irqtimeps_ind,
int endtime_ind)
{
/* in case of an overflow, reset the whole profile */
if (data->dasd_io_reqs == UINT_MAX) {
memset(data, 0, sizeof(*data));
ktime_get_real_ts64(&data->starttod);
}
data->dasd_io_reqs++;
data->dasd_io_sects += sectors;
if (is_alias)
data->dasd_io_alias++;
if (is_tpm)
data->dasd_io_tpm++;
data->dasd_io_secs[sectors_ind]++;
data->dasd_io_times[tottime_ind]++;
data->dasd_io_timps[tottimeps_ind]++;
data->dasd_io_time1[strtime_ind]++;
data->dasd_io_time2[irqtime_ind]++;
data->dasd_io_time2ps[irqtimeps_ind]++;
data->dasd_io_time3[endtime_ind]++;
if (is_read) {
data->dasd_read_reqs++;
data->dasd_read_sects += sectors;
if (is_alias)
data->dasd_read_alias++;
if (is_tpm)
data->dasd_read_tpm++;
data->dasd_read_secs[sectors_ind]++;
data->dasd_read_times[tottime_ind]++;
data->dasd_read_time1[strtime_ind]++;
data->dasd_read_time2[irqtime_ind]++;
data->dasd_read_time3[endtime_ind]++;
}
}
static void dasd_profile_end(struct dasd_block *block,
struct dasd_ccw_req *cqr,
struct request *req)
{
unsigned long strtime, irqtime, endtime, tottime;
unsigned long tottimeps, sectors;
struct dasd_device *device;
int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
int irqtime_ind, irqtimeps_ind, endtime_ind;
struct dasd_profile_info *data;
device = cqr->startdev;
if (!(dasd_global_profile_level ||
block->profile.data ||
device->profile.data))
return;
sectors = blk_rq_sectors(req);
if (!cqr->buildclk || !cqr->startclk ||
!cqr->stopclk || !cqr->endclk ||
!sectors)
return;
strtime = ((cqr->startclk - cqr->buildclk) >> 12);
irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
endtime = ((cqr->endclk - cqr->stopclk) >> 12);
tottime = ((cqr->endclk - cqr->buildclk) >> 12);
tottimeps = tottime / sectors;
dasd_profile_counter(sectors, sectors_ind);
dasd_profile_counter(tottime, tottime_ind);
dasd_profile_counter(tottimeps, tottimeps_ind);
dasd_profile_counter(strtime, strtime_ind);
dasd_profile_counter(irqtime, irqtime_ind);
dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
dasd_profile_counter(endtime, endtime_ind);
spin_lock(&dasd_global_profile.lock);
if (dasd_global_profile.data) {
data = dasd_global_profile.data;
data->dasd_sum_times += tottime;
data->dasd_sum_time_str += strtime;
data->dasd_sum_time_irq += irqtime;
data->dasd_sum_time_end += endtime;
dasd_profile_end_add_data(dasd_global_profile.data,
cqr->startdev != block->base,
cqr->cpmode == 1,
rq_data_dir(req) == READ,
sectors, sectors_ind, tottime_ind,
tottimeps_ind, strtime_ind,
irqtime_ind, irqtimeps_ind,
endtime_ind);
}
spin_unlock(&dasd_global_profile.lock);
spin_lock(&block->profile.lock);
if (block->profile.data) {
data = block->profile.data;
data->dasd_sum_times += tottime;
data->dasd_sum_time_str += strtime;
data->dasd_sum_time_irq += irqtime;
data->dasd_sum_time_end += endtime;
dasd_profile_end_add_data(block->profile.data,
cqr->startdev != block->base,
cqr->cpmode == 1,
rq_data_dir(req) == READ,
sectors, sectors_ind, tottime_ind,
tottimeps_ind, strtime_ind,
irqtime_ind, irqtimeps_ind,
endtime_ind);
}
spin_unlock(&block->profile.lock);
spin_lock(&device->profile.lock);
if (device->profile.data) {
data = device->profile.data;
data->dasd_sum_times += tottime;
data->dasd_sum_time_str += strtime;
data->dasd_sum_time_irq += irqtime;
data->dasd_sum_time_end += endtime;
dasd_profile_end_add_data(device->profile.data,
cqr->startdev != block->base,
cqr->cpmode == 1,
rq_data_dir(req) == READ,
sectors, sectors_ind, tottime_ind,
tottimeps_ind, strtime_ind,
irqtime_ind, irqtimeps_ind,
endtime_ind);
}
spin_unlock(&device->profile.lock);
}
void dasd_profile_reset(struct dasd_profile *profile)
{
struct dasd_profile_info *data;
spin_lock_bh(&profile->lock);
data = profile->data;
if (!data) {
spin_unlock_bh(&profile->lock);
return;
}
memset(data, 0, sizeof(*data));
ktime_get_real_ts64(&data->starttod);
spin_unlock_bh(&profile->lock);
}
int dasd_profile_on(struct dasd_profile *profile)
{
struct dasd_profile_info *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock_bh(&profile->lock);
if (profile->data) {
spin_unlock_bh(&profile->lock);
kfree(data);
return 0;
}
ktime_get_real_ts64(&data->starttod);
profile->data = data;
spin_unlock_bh(&profile->lock);
return 0;
}
void dasd_profile_off(struct dasd_profile *profile)
{
spin_lock_bh(&profile->lock);
kfree(profile->data);
profile->data = NULL;
spin_unlock_bh(&profile->lock);
}
char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
{
char *buffer;
buffer = vmalloc(user_len + 1);
if (buffer == NULL)
return ERR_PTR(-ENOMEM);
if (copy_from_user(buffer, user_buf, user_len) != 0) {
vfree(buffer);
return ERR_PTR(-EFAULT);
}
/* got the string, now strip linefeed. */
if (buffer[user_len - 1] == '\n')
buffer[user_len - 1] = 0;
else
buffer[user_len] = 0;
return buffer;
}
static ssize_t dasd_stats_write(struct file *file,
const char __user *user_buf,
size_t user_len, loff_t *pos)
{
char *buffer, *str;
int rc;
struct seq_file *m = (struct seq_file *)file->private_data;
struct dasd_profile *prof = m->private;
if (user_len > 65536)
user_len = 65536;
buffer = dasd_get_user_string(user_buf, user_len);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
str = skip_spaces(buffer);
rc = user_len;
if (strncmp(str, "reset", 5) == 0) {
dasd_profile_reset(prof);
} else if (strncmp(str, "on", 2) == 0) {
rc = dasd_profile_on(prof);
if (rc)
goto out;
rc = user_len;
if (prof == &dasd_global_profile) {
dasd_profile_reset(prof);
dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
}
} else if (strncmp(str, "off", 3) == 0) {
if (prof == &dasd_global_profile)
dasd_global_profile_level = DASD_PROFILE_OFF;
dasd_profile_off(prof);
} else
rc = -EINVAL;
out:
vfree(buffer);
return rc;
}
static void dasd_stats_array(struct seq_file *m, unsigned int *array)
{
int i;
for (i = 0; i < 32; i++)
seq_printf(m, "%u ", array[i]);
seq_putc(m, '\n');
}
static void dasd_stats_seq_print(struct seq_file *m,
struct dasd_profile_info *data)
{
seq_printf(m, "start_time %lld.%09ld\n",
(s64)data->starttod.tv_sec, data->starttod.tv_nsec);
seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
data->dasd_sum_times / data->dasd_io_reqs : 0UL);
seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
seq_puts(m, "histogram_sectors ");
dasd_stats_array(m, data->dasd_io_secs);
seq_puts(m, "histogram_io_times ");
dasd_stats_array(m, data->dasd_io_times);
seq_puts(m, "histogram_io_times_weighted ");
dasd_stats_array(m, data->dasd_io_timps);
seq_puts(m, "histogram_time_build_to_ssch ");
dasd_stats_array(m, data->dasd_io_time1);
seq_puts(m, "histogram_time_ssch_to_irq ");
dasd_stats_array(m, data->dasd_io_time2);
seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
dasd_stats_array(m, data->dasd_io_time2ps);
seq_puts(m, "histogram_time_irq_to_end ");
dasd_stats_array(m, data->dasd_io_time3);
seq_puts(m, "histogram_ccw_queue_length ");
dasd_stats_array(m, data->dasd_io_nr_req);
seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
seq_puts(m, "histogram_read_sectors ");
dasd_stats_array(m, data->dasd_read_secs);
seq_puts(m, "histogram_read_times ");
dasd_stats_array(m, data->dasd_read_times);
seq_puts(m, "histogram_read_time_build_to_ssch ");
dasd_stats_array(m, data->dasd_read_time1);
seq_puts(m, "histogram_read_time_ssch_to_irq ");
dasd_stats_array(m, data->dasd_read_time2);
seq_puts(m, "histogram_read_time_irq_to_end ");
dasd_stats_array(m, data->dasd_read_time3);
seq_puts(m, "histogram_read_ccw_queue_length ");
dasd_stats_array(m, data->dasd_read_nr_req);
}
static int dasd_stats_show(struct seq_file *m, void *v)
{
struct dasd_profile *profile;
struct dasd_profile_info *data;
profile = m->private;
spin_lock_bh(&profile->lock);
data = profile->data;
if (!data) {
spin_unlock_bh(&profile->lock);
seq_puts(m, "disabled\n");
return 0;
}
dasd_stats_seq_print(m, data);
spin_unlock_bh(&profile->lock);
return 0;
}
static int dasd_stats_open(struct inode *inode, struct file *file)
{
struct dasd_profile *profile = inode->i_private;
return single_open(file, dasd_stats_show, profile);
}
static const struct file_operations dasd_stats_raw_fops = {
.owner = THIS_MODULE,
.open = dasd_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = dasd_stats_write,
};
static void dasd_profile_init(struct dasd_profile *profile,
struct dentry *base_dentry)
{
umode_t mode;
struct dentry *pde;
if (!base_dentry)
return;
profile->dentry = NULL;
profile->data = NULL;
mode = (S_IRUSR | S_IWUSR | S_IFREG);
pde = debugfs_create_file("statistics", mode, base_dentry,
profile, &dasd_stats_raw_fops);
if (pde && !IS_ERR(pde))
profile->dentry = pde;
return;
}
static void dasd_profile_exit(struct dasd_profile *profile)
{
dasd_profile_off(profile);
debugfs_remove(profile->dentry);
profile->dentry = NULL;
}
static void dasd_statistics_removeroot(void)
{
dasd_global_profile_level = DASD_PROFILE_OFF;
dasd_profile_exit(&dasd_global_profile);
debugfs_remove(dasd_debugfs_global_entry);
debugfs_remove(dasd_debugfs_root_entry);
}
static void dasd_statistics_createroot(void)
{
struct dentry *pde;
dasd_debugfs_root_entry = NULL;
pde = debugfs_create_dir("dasd", NULL);
if (!pde || IS_ERR(pde))
goto error;
dasd_debugfs_root_entry = pde;
pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
if (!pde || IS_ERR(pde))
goto error;
dasd_debugfs_global_entry = pde;
dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
return;
error:
DBF_EVENT(DBF_ERR, "%s",
"Creation of the dasd debugfs interface failed");
dasd_statistics_removeroot();
return;
}
#else
#define dasd_profile_start(block, cqr, req) do {} while (0)
#define dasd_profile_end(block, cqr, req) do {} while (0)
static void dasd_statistics_createroot(void)
{
return;
}
static void dasd_statistics_removeroot(void)
{
return;
}
int dasd_stats_generic_show(struct seq_file *m, void *v)
{
seq_puts(m, "Statistics are not activated in this kernel\n");
return 0;
}
static void dasd_profile_init(struct dasd_profile *profile,
struct dentry *base_dentry)
{
return;
}
static void dasd_profile_exit(struct dasd_profile *profile)
{
return;
}
int dasd_profile_on(struct dasd_profile *profile)
{
return 0;
}
#endif /* CONFIG_DASD_PROFILE */
static int dasd_hosts_show(struct seq_file *m, void *v)
{
struct dasd_device *device;
int rc = -EOPNOTSUPP;
device = m->private;
dasd_get_device(device);
if (device->discipline->hosts_print)
rc = device->discipline->hosts_print(device, m);
dasd_put_device(device);
return rc;
}
DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
static void dasd_hosts_exit(struct dasd_device *device)
{
debugfs_remove(device->hosts_dentry);
device->hosts_dentry = NULL;
}
static void dasd_hosts_init(struct dentry *base_dentry,
struct dasd_device *device)
{
struct dentry *pde;
umode_t mode;
if (!base_dentry)
return;
mode = S_IRUSR | S_IFREG;
pde = debugfs_create_file("host_access_list", mode, base_dentry,
device, &dasd_hosts_fops);
if (pde && !IS_ERR(pde))
device->hosts_dentry = pde;
}
struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
unsigned long flags;
char *data, *chunk;
int size = 0;
if (cplength > 0)
size += cplength * sizeof(struct ccw1);
if (datasize > 0)
size += datasize;
if (!cqr)
size += (sizeof(*cqr) + 7L) & -8L;
spin_lock_irqsave(&device->mem_lock, flags);
data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
spin_unlock_irqrestore(&device->mem_lock, flags);
if (!chunk)
return ERR_PTR(-ENOMEM);
if (!cqr) {
cqr = (void *) data;
data += (sizeof(*cqr) + 7L) & -8L;
}
memset(cqr, 0, sizeof(*cqr));
cqr->mem_chunk = chunk;
if (cplength > 0) {
cqr->cpaddr = data;
data += cplength * sizeof(struct ccw1);
memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
}
if (datasize > 0) {
cqr->data = data;
memset(cqr->data, 0, datasize);
}
cqr->magic = magic;
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
}
EXPORT_SYMBOL(dasd_smalloc_request);
struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
int datasize,
struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
unsigned long flags;
int size, cqr_size;
char *data;
cqr_size = (sizeof(*cqr) + 7L) & -8L;
size = cqr_size;
if (cplength > 0)
size += cplength * sizeof(struct ccw1);
if (datasize > 0)
size += datasize;
spin_lock_irqsave(&device->mem_lock, flags);
cqr = dasd_alloc_chunk(&device->ese_chunks, size);
spin_unlock_irqrestore(&device->mem_lock, flags);
if (!cqr)
return ERR_PTR(-ENOMEM);
memset(cqr, 0, sizeof(*cqr));
data = (char *)cqr + cqr_size;
cqr->cpaddr = NULL;
if (cplength > 0) {
cqr->cpaddr = data;
data += cplength * sizeof(struct ccw1);
memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
}
cqr->data = NULL;
if (datasize > 0) {
cqr->data = data;
memset(cqr->data, 0, datasize);
}
cqr->magic = magic;
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
return cqr;
}
EXPORT_SYMBOL(dasd_fmalloc_request);
void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
{
unsigned long flags;
spin_lock_irqsave(&device->mem_lock, flags);
dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
spin_unlock_irqrestore(&device->mem_lock, flags);
dasd_put_device(device);
}
EXPORT_SYMBOL(dasd_sfree_request);
void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
{
unsigned long flags;
spin_lock_irqsave(&device->mem_lock, flags);
dasd_free_chunk(&device->ese_chunks, cqr);
spin_unlock_irqrestore(&device->mem_lock, flags);
dasd_put_device(device);
}
EXPORT_SYMBOL(dasd_ffree_request);
/*
* Check discipline magic in cqr.
*/
static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
if (cqr == NULL)
return -EINVAL;
device = cqr->startdev;
if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
DBF_DEV_EVENT(DBF_WARNING, device,
" dasd_ccw_req 0x%08x magic doesn't match"
" discipline 0x%08x",
cqr->magic,
*(unsigned int *) device->discipline->name);
return -EINVAL;
}
return 0;
}
/*
* Terminate the current i/o and set the request to clear_pending.
* Timer keeps device runnig.
* ccw_device_clear can fail if the i/o subsystem
* is in a bad mood.
*/
int dasd_term_IO(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int retries, rc;
char errorstring[ERRORLENGTH];
/* Check the cqr */
rc = dasd_check_cqr(cqr);
if (rc)
return rc;
retries = 0;
device = (struct dasd_device *) cqr->startdev;
while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
rc = ccw_device_clear(device->cdev, (long) cqr);
switch (rc) {
case 0: /* termination successful */
cqr->status = DASD_CQR_CLEAR_PENDING;
cqr->stopclk = get_tod_clock();
cqr->starttime = 0;
DBF_DEV_EVENT(DBF_DEBUG, device,
"terminate cqr %p successful",
cqr);
break;
case -ENODEV:
DBF_DEV_EVENT(DBF_ERR, device, "%s",
"device gone, retry");
break;
case -EINVAL:
/*
* device not valid so no I/O could be running
* handle CQR as termination successful
*/
cqr->status = DASD_CQR_CLEARED;
cqr->stopclk = get_tod_clock();
cqr->starttime = 0;
/* no retries for invalid devices */
cqr->retries = -1;
DBF_DEV_EVENT(DBF_ERR, device, "%s",
"EINVAL, handle as terminated");
/* fake rc to success */
rc = 0;
break;
default:
/* internal error 10 - unknown rc*/
snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
dev_err(&device->cdev->dev, "An error occurred in the "
"DASD device driver, reason=%s\n", errorstring);
BUG();
break;
}
retries++;
}
dasd_schedule_device_bh(device);
return rc;
}
EXPORT_SYMBOL(dasd_term_IO);
/*
* Start the i/o. This start_IO can fail if the channel is really busy.
* In that case set up a timer to start the request later.
*/
int dasd_start_IO(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int rc;
char errorstring[ERRORLENGTH];
/* Check the cqr */
rc = dasd_check_cqr(cqr);
if (rc) {
cqr->intrc = rc;
return rc;
}
device = (struct dasd_device *) cqr->startdev;
if (((cqr->block &&
test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
"because of stolen lock", cqr);
cqr->status = DASD_CQR_ERROR;
cqr->intrc = -EPERM;
return -EPERM;
}
if (cqr->retries < 0) {
/* internal error 14 - start_IO run out of retries */
sprintf(errorstring, "14 %p", cqr);
dev_err(&device->cdev->dev, "An error occurred in the DASD "
"device driver, reason=%s\n", errorstring);
cqr->status = DASD_CQR_ERROR;
return -EIO;
}
cqr->startclk = get_tod_clock();
cqr->starttime = jiffies;
cqr->retries--;
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
cqr->lpm &= dasd_path_get_opm(device);
if (!cqr->lpm)
cqr->lpm = dasd_path_get_opm(device);
}
/*
* remember the amount of formatted tracks to prevent double format on
* ESE devices
*/
if (cqr->block)
cqr->trkcount = atomic_read(&cqr->block->trkcount);
if (cqr->cpmode == 1) {
rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
(long) cqr, cqr->lpm);
} else {
rc = ccw_device_start(device->cdev, cqr->cpaddr,
(long) cqr, cqr->lpm, 0);
}
switch (rc) {
case 0:
cqr->status = DASD_CQR_IN_IO;
break;
case -EBUSY:
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: device busy, retry later");
break;
case -EACCES:
/* -EACCES indicates that the request used only a subset of the
* available paths and all these paths are gone. If the lpm of
* this request was only a subset of the opm (e.g. the ppm) then
* we just do a retry with all available paths.
* If we already use the full opm, something is amiss, and we
* need a full path verification.
*/
if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
DBF_DEV_EVENT(DBF_WARNING, device,
"start_IO: selected paths gone (%x)",
cqr->lpm);
} else if (cqr->lpm != dasd_path_get_opm(device)) {
cqr->lpm = dasd_path_get_opm(device);
DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
"start_IO: selected paths gone,"
" retry on all paths");
} else {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: all paths in opm gone,"
" do path verification");
dasd_generic_last_path_gone(device);
dasd_path_no_path(device);
dasd_path_set_tbvpm(device,
ccw_device_get_path_mask(
device->cdev));
}
break;
case -ENODEV:
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: -ENODEV device gone, retry");
/* this is equivalent to CC=3 for SSCH report this to EER */
dasd_handle_autoquiesce(device, cqr, DASD_EER_STARTIO);
break;
case -EIO:
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: -EIO device gone, retry");
break;
case -EINVAL:
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"start_IO: -EINVAL device currently "
"not accessible");
break;
default:
/* internal error 11 - unknown rc */
snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
dev_err(&device->cdev->dev,
"An error occurred in the DASD device driver, "
"reason=%s\n", errorstring);
BUG();
break;
}
cqr->intrc = rc;
return rc;
}
EXPORT_SYMBOL(dasd_start_IO);
/*
* Timeout function for dasd devices. This is used for different purposes
* 1) missing interrupt handler for normal operation
* 2) delayed start of request where start_IO failed with -EBUSY
* 3) timeout for missing state change interrupts
* The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
* DASD_CQR_QUEUED for 2) and 3).
*/
static void dasd_device_timeout(struct timer_list *t)
{
unsigned long flags;
struct dasd_device *device;
device = from_timer(device, t, timer);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_schedule_device_bh(device);
}
/*
* Setup timeout for a device in jiffies.
*/
void dasd_device_set_timer(struct dasd_device *device, int expires)
{
if (expires == 0)
del_timer(&device->timer);
else
mod_timer(&device->timer, jiffies + expires);
}
EXPORT_SYMBOL(dasd_device_set_timer);
/*
* Clear timeout for a device.
*/
void dasd_device_clear_timer(struct dasd_device *device)
{
del_timer(&device->timer);
}
EXPORT_SYMBOL(dasd_device_clear_timer);
static void dasd_handle_killed_request(struct ccw_device *cdev,
unsigned long intparm)
{
struct dasd_ccw_req *cqr;
struct dasd_device *device;
if (!intparm)
return;
cqr = (struct dasd_ccw_req *) intparm;
if (cqr->status != DASD_CQR_IN_IO) {
DBF_EVENT_DEVID(DBF_DEBUG, cdev,
"invalid status in handle_killed_request: "
"%02x", cqr->status);
return;
}
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device)) {
DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
"unable to get device from cdev");
return;
}
if (!cqr->startdev ||
device != cqr->startdev ||
strncmp(cqr->startdev->discipline->ebcname,
(char *) &cqr->magic, 4)) {
DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
"invalid device in request");
dasd_put_device(device);
return;
}
/* Schedule request to be retried. */
cqr->status = DASD_CQR_QUEUED;
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
dasd_put_device(device);
}
void dasd_generic_handle_state_change(struct dasd_device *device)
{
/* First of all start sense subsystem status request. */
dasd_eer_snss(device);
dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
dasd_schedule_device_bh(device);
if (device->block) {
dasd_schedule_block_bh(device->block);
if (device->block->gdp)
blk_mq_run_hw_queues(device->block->gdp->queue, true);
}
}
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
static int dasd_check_hpf_error(struct irb *irb)
{
return (scsw_tm_is_valid_schxs(&irb->scsw) &&
(irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
}
static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
{
struct dasd_device *device = NULL;
u8 *sense = NULL;
if (!block)
return 0;
device = block->base;
if (!device || !device->discipline->is_ese)
return 0;
if (!device->discipline->is_ese(device))
return 0;
sense = dasd_get_sense(irb);
if (!sense)
return 0;
return !!(sense[1] & SNS1_NO_REC_FOUND) ||
!!(sense[1] & SNS1_FILE_PROTECTED) ||
scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN;
}
static int dasd_ese_oos_cond(u8 *sense)
{
return sense[0] & SNS0_EQUIPMENT_CHECK &&
sense[1] & SNS1_PERM_ERR &&
sense[1] & SNS1_WRITE_INHIBITED &&
sense[25] == 0x01;
}
/*
* Interrupt handler for "normal" ssch-io based dasd devices.
*/
void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
struct dasd_ccw_req *cqr, *next, *fcqr;
struct dasd_device *device;
unsigned long now;
int nrf_suppressed = 0;
int fp_suppressed = 0;
struct request *req;
u8 *sense = NULL;
int expires;
cqr = (struct dasd_ccw_req *) intparm;
if (IS_ERR(irb)) {
switch (PTR_ERR(irb)) {
case -EIO:
if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
device = cqr->startdev;
cqr->status = DASD_CQR_CLEARED;
dasd_device_clear_timer(device);
wake_up(&dasd_flush_wq);
dasd_schedule_device_bh(device);
return;
}
break;
case -ETIMEDOUT:
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
"request timed out\n", __func__);
break;
default:
DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
"unknown error %ld\n", __func__,
PTR_ERR(irb));
}
dasd_handle_killed_request(cdev, intparm);
return;
}
now = get_tod_clock();
/* check for conditions that should be handled immediately */
if (!cqr ||
!(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
scsw_cstat(&irb->scsw) == 0)) {
if (cqr)
memcpy(&cqr->irb, irb, sizeof(*irb));
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device))
return;
/* ignore unsolicited interrupts for DIAG discipline */
if (device->discipline == dasd_diag_discipline_pointer) {
dasd_put_device(device);
return;
}
/*
* In some cases 'File Protected' or 'No Record Found' errors
* might be expected and debug log messages for the
* corresponding interrupts shouldn't be written then.
* Check if either of the according suppress bits is set.
*/
sense = dasd_get_sense(irb);
if (sense) {
fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) &&
test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
/*
* Extent pool probably out-of-space.
* Stop device and check exhaust level.
*/
if (dasd_ese_oos_cond(sense)) {
dasd_generic_space_exhaust(device, cqr);
device->discipline->ext_pool_exhaust(device, cqr);
dasd_put_device(device);
return;
}
}
if (!(fp_suppressed || nrf_suppressed))
device->discipline->dump_sense_dbf(device, irb, "int");
if (device->features & DASD_FEATURE_ERPLOG)
device->discipline->dump_sense(device, cqr, irb);
device->discipline->check_for_device_change(device, cqr, irb);
dasd_put_device(device);
}
/* check for attention message */
if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
device = dasd_device_from_cdev_locked(cdev);
if (!IS_ERR(device)) {
device->discipline->check_attention(device,
irb->esw.esw1.lpum);
dasd_put_device(device);
}
}
if (!cqr)
return;
device = (struct dasd_device *) cqr->startdev;
if (!device ||
strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
"invalid device in request");
return;
}
if (dasd_ese_needs_format(cqr->block, irb)) {
req = dasd_get_callback_data(cqr);
if (!req) {
cqr->status = DASD_CQR_ERROR;
return;
}
if (rq_data_dir(req) == READ) {
device->discipline->ese_read(cqr, irb);
cqr->status = DASD_CQR_SUCCESS;
cqr->stopclk = now;
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
return;
}
fcqr = device->discipline->ese_format(device, cqr, irb);
if (IS_ERR(fcqr)) {
if (PTR_ERR(fcqr) == -EINVAL) {
cqr->status = DASD_CQR_ERROR;
return;
}
/*
* If we can't format now, let the request go
* one extra round. Maybe we can format later.
*/
cqr->status = DASD_CQR_QUEUED;
dasd_schedule_device_bh(device);
return;
} else {
fcqr->status = DASD_CQR_QUEUED;
cqr->status = DASD_CQR_QUEUED;
list_add(&fcqr->devlist, &device->ccw_queue);
dasd_schedule_device_bh(device);
return;
}
}
/* Check for clear pending */
if (cqr->status == DASD_CQR_CLEAR_PENDING &&
scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
cqr->status = DASD_CQR_CLEARED;
dasd_device_clear_timer(device);
wake_up(&dasd_flush_wq);
dasd_schedule_device_bh(device);
return;
}
/* check status - the request might have been killed by dyn detach */
if (cqr->status != DASD_CQR_IN_IO) {
DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
"status %02x", dev_name(&cdev->dev), cqr->status);
return;
}
next = NULL;
expires = 0;
if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
scsw_cstat(&irb->scsw) == 0) {
/* request was completed successfully */
cqr->status = DASD_CQR_SUCCESS;
cqr->stopclk = now;
/* Start first request on queue if possible -> fast_io. */
if (cqr->devlist.next != &device->ccw_queue) {
next = list_entry(cqr->devlist.next,
struct dasd_ccw_req, devlist);
}
} else { /* error */
/* check for HPF error
* call discipline function to requeue all requests
* and disable HPF accordingly
*/
if (cqr->cpmode && dasd_check_hpf_error(irb) &&
device->discipline->handle_hpf_error)
device->discipline->handle_hpf_error(device, irb);
/*
* If we don't want complex ERP for this request, then just
* reset this and retry it in the fastpath
*/
if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
cqr->retries > 0) {
if (cqr->lpm == dasd_path_get_opm(device))
DBF_DEV_EVENT(DBF_DEBUG, device,
"default ERP in fastpath "
"(%i retries left)",
cqr->retries);
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
cqr->lpm = dasd_path_get_opm(device);
cqr->status = DASD_CQR_QUEUED;
next = cqr;
} else
cqr->status = DASD_CQR_ERROR;
}
if (next && (next->status == DASD_CQR_QUEUED) &&
(!device->stopped)) {
if (device->discipline->start_IO(next) == 0)
expires = next->expires;
}
if (expires != 0)
dasd_device_set_timer(device, expires);
else
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
}
EXPORT_SYMBOL(dasd_int_handler);
enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
{
struct dasd_device *device;
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device))
goto out;
if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
device->state != device->target ||
!device->discipline->check_for_device_change){
dasd_put_device(device);
goto out;
}
if (device->discipline->dump_sense_dbf)
device->discipline->dump_sense_dbf(device, irb, "uc");
device->discipline->check_for_device_change(device, NULL, irb);
dasd_put_device(device);
out:
return UC_TODO_RETRY;
}
EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
/*
* If we have an error on a dasd_block layer request then we cancel
* and return all further requests from the same dasd_block as well.
*/
static void __dasd_device_recovery(struct dasd_device *device,
struct dasd_ccw_req *ref_cqr)
{
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
/*
* only requeue request that came from the dasd_block layer
*/
if (!ref_cqr->block)
return;
list_for_each_safe(l, n, &device->ccw_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist);
if (cqr->status == DASD_CQR_QUEUED &&
ref_cqr->block == cqr->block) {
cqr->status = DASD_CQR_CLEARED;
}
}
};
/*
* Remove those ccw requests from the queue that need to be returned
* to the upper layer.
*/
static void __dasd_device_process_ccw_queue(struct dasd_device *device,
struct list_head *final_queue)
{
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
/* Process request with final status. */
list_for_each_safe(l, n, &device->ccw_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist);
/* Skip any non-final request. */
if (cqr->status == DASD_CQR_QUEUED ||
cqr->status == DASD_CQR_IN_IO ||
cqr->status == DASD_CQR_CLEAR_PENDING)
continue;
if (cqr->status == DASD_CQR_ERROR) {
__dasd_device_recovery(device, cqr);
}
/* Rechain finished requests to final queue */
list_move_tail(&cqr->devlist, final_queue);
}
}
static void __dasd_process_cqr(struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
char errorstring[ERRORLENGTH];
switch (cqr->status) {
case DASD_CQR_SUCCESS:
cqr->status = DASD_CQR_DONE;
break;
case DASD_CQR_ERROR:
cqr->status = DASD_CQR_NEED_ERP;
break;
case DASD_CQR_CLEARED:
cqr->status = DASD_CQR_TERMINATED;
break;
default:
/* internal error 12 - wrong cqr status*/
snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
dev_err(&device->cdev->dev,
"An error occurred in the DASD device driver, "
"reason=%s\n", errorstring);
BUG();
}
if (cqr->callback)
cqr->callback(cqr, cqr->callback_data);
}
/*
* the cqrs from the final queue are returned to the upper layer
* by setting a dasd_block state and calling the callback function
*/
static void __dasd_device_process_final_queue(struct dasd_device *device,
struct list_head *final_queue)
{
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
struct dasd_block *block;
list_for_each_safe(l, n, final_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist);
list_del_init(&cqr->devlist);
block = cqr->block;
if (!block) {
__dasd_process_cqr(device, cqr);
} else {
spin_lock_bh(&block->queue_lock);
__dasd_process_cqr(device, cqr);
spin_unlock_bh(&block->queue_lock);
}
}
}
/*
* check if device should be autoquiesced due to too many timeouts
*/
static void __dasd_device_check_autoquiesce_timeout(struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
if ((device->default_retries - cqr->retries) >= device->aq_timeouts)
dasd_handle_autoquiesce(device, cqr, DASD_EER_TIMEOUTS);
}
/*
* Take a look at the first request on the ccw queue and check
* if it reached its expire time. If so, terminate the IO.
*/
static void __dasd_device_check_expire(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
if (list_empty(&device->ccw_queue))
return;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
(time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/*
* IO in safe offline processing should not
* run out of retries
*/
cqr->retries++;
}
if (device->discipline->term_IO(cqr) != 0) {
/* Hmpf, try again in 5 sec */
dev_err(&device->cdev->dev,
"cqr %p timed out (%lus) but cannot be "
"ended, retrying in 5 s\n",
cqr, (cqr->expires/HZ));
cqr->expires += 5*HZ;
dasd_device_set_timer(device, 5*HZ);
} else {
dev_err(&device->cdev->dev,
"cqr %p timed out (%lus), %i retries "
"remaining\n", cqr, (cqr->expires/HZ),
cqr->retries);
}
__dasd_device_check_autoquiesce_timeout(device, cqr);
}
}
/*
* return 1 when device is not eligible for IO
*/
static int __dasd_device_is_unusable(struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC);
if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/*
* dasd is being set offline
* but it is no safe offline where we have to allow I/O
*/
return 1;
}
if (device->stopped) {
if (device->stopped & mask) {
/* stopped and CQR will not change that. */
return 1;
}
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
/* CQR is not able to change device to
* operational. */
return 1;
}
/* CQR required to get device operational. */
}
return 0;
}
/*
* Take a look at the first request on the ccw queue and check
* if it needs to be started.
*/
static void __dasd_device_start_head(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
if (list_empty(&device->ccw_queue))
return;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
if (cqr->status != DASD_CQR_QUEUED)
return;
/* if device is not usable return request to upper layer */
if (__dasd_device_is_unusable(device, cqr)) {
cqr->intrc = -EAGAIN;
cqr->status = DASD_CQR_CLEARED;
dasd_schedule_device_bh(device);
return;
}
rc = device->discipline->start_IO(cqr);
if (rc == 0)
dasd_device_set_timer(device, cqr->expires);
else if (rc == -EACCES) {
dasd_schedule_device_bh(device);
} else
/* Hmpf, try again in 1/2 sec */
dasd_device_set_timer(device, 50);
}
static void __dasd_device_check_path_events(struct dasd_device *device)
{
__u8 tbvpm, fcsecpm;
int rc;
tbvpm = dasd_path_get_tbvpm(device);
fcsecpm = dasd_path_get_fcsecpm(device);
if (!tbvpm && !fcsecpm)
return;
if (device->stopped & ~(DASD_STOPPED_DC_WAIT))
return;
dasd_path_clear_all_verify(device);
dasd_path_clear_all_fcsec(device);
rc = device->discipline->pe_handler(device, tbvpm, fcsecpm);
if (rc) {
dasd_path_add_tbvpm(device, tbvpm);
dasd_path_add_fcsecpm(device, fcsecpm);
dasd_device_set_timer(device, 50);
}
};
/*
* Go through all request on the dasd_device request queue,
* terminate them on the cdev if necessary, and return them to the
* submitting layer via callback.
* Note:
* Make sure that all 'submitting layers' still exist when
* this function is called!. In other words, when 'device' is a base
* device then all block layer requests must have been removed before
* via dasd_flush_block_queue.
*/
int dasd_flush_device_queue(struct dasd_device *device)
{
struct dasd_ccw_req *cqr, *n;
int rc;
struct list_head flush_queue;
INIT_LIST_HEAD(&flush_queue);
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = 0;
list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
/* Check status and move request to flush_queue */
switch (cqr->status) {
case DASD_CQR_IN_IO:
rc = device->discipline->term_IO(cqr);
if (rc) {
/* unable to terminate requeust */
dev_err(&device->cdev->dev,
"Flushing the DASD request queue "
"failed for request %p\n", cqr);
/* stop flush processing */
goto finished;
}
break;
case DASD_CQR_QUEUED:
cqr->stopclk = get_tod_clock();
cqr->status = DASD_CQR_CLEARED;
break;
default: /* no need to modify the others */
break;
}
list_move_tail(&cqr->devlist, &flush_queue);
}
finished:
spin_unlock_irq(get_ccwdev_lock(device->cdev));
/*
* After this point all requests must be in state CLEAR_PENDING,
* CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
* one of the others.
*/
list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
wait_event(dasd_flush_wq,
(cqr->status != DASD_CQR_CLEAR_PENDING));
/*
* Now set each request back to TERMINATED, DONE or NEED_ERP
* and call the callback function of flushed requests
*/
__dasd_device_process_final_queue(device, &flush_queue);
return rc;
}
EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
/*
* Acquire the device lock and process queues for the device.
*/
static void dasd_device_tasklet(unsigned long data)
{
struct dasd_device *device = (struct dasd_device *) data;
struct list_head final_queue;
atomic_set (&device->tasklet_scheduled, 0);
INIT_LIST_HEAD(&final_queue);
spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Check expire time of first request on the ccw queue. */
__dasd_device_check_expire(device);
/* find final requests on ccw queue */
__dasd_device_process_ccw_queue(device, &final_queue);
__dasd_device_check_path_events(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
/* Now call the callback function of requests with final status */
__dasd_device_process_final_queue(device, &final_queue);
spin_lock_irq(get_ccwdev_lock(device->cdev));
/* Now check if the head of the ccw queue needs to be started. */
__dasd_device_start_head(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
if (waitqueue_active(&shutdown_waitq))
wake_up(&shutdown_waitq);
dasd_put_device(device);
}
/*
* Schedules a call to dasd_tasklet over the device tasklet.
*/
void dasd_schedule_device_bh(struct dasd_device *device)
{
/* Protect against rescheduling. */
if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
return;
dasd_get_device(device);
tasklet_hi_schedule(&device->tasklet);
}
EXPORT_SYMBOL(dasd_schedule_device_bh);
void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
{
device->stopped |= bits;
}
EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
{
device->stopped &= ~bits;
if (!device->stopped)
wake_up(&generic_waitq);
}
EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
/*
* Queue a request to the head of the device ccw_queue.
* Start the I/O if possible.
*/
void dasd_add_request_head(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
unsigned long flags;
device = cqr->startdev;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
cqr->status = DASD_CQR_QUEUED;
list_add(&cqr->devlist, &device->ccw_queue);
/* let the bh start the request to keep them in order */
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
EXPORT_SYMBOL(dasd_add_request_head);
/*
* Queue a request to the tail of the device ccw_queue.
* Start the I/O if possible.
*/
void dasd_add_request_tail(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
unsigned long flags;
device = cqr->startdev;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
cqr->status = DASD_CQR_QUEUED;
list_add_tail(&cqr->devlist, &device->ccw_queue);
/* let the bh start the request to keep them in order */
dasd_schedule_device_bh(device);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
EXPORT_SYMBOL(dasd_add_request_tail);
/*
* Wakeup helper for the 'sleep_on' functions.
*/
void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
{
spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
cqr->callback_data = DASD_SLEEPON_END_TAG;
spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
wake_up(&generic_waitq);
}
EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int rc;
device = cqr->startdev;
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
/*
* checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
*/
static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
dasd_erp_fn_t erp_fn;
if (cqr->status == DASD_CQR_FILLED)
return 0;
device = cqr->startdev;
if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
if (cqr->status == DASD_CQR_TERMINATED) {
device->discipline->handle_terminated_request(cqr);
return 1;
}
if (cqr->status == DASD_CQR_NEED_ERP) {
erp_fn = device->discipline->erp_action(cqr);
erp_fn(cqr);
return 1;
}
if (cqr->status == DASD_CQR_FAILED)
dasd_log_sense(cqr, &cqr->irb);
if (cqr->refers) {
__dasd_process_erp(device, cqr);
return 1;
}
}
return 0;
}
static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
{
if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
if (cqr->refers) /* erp is not done yet */
return 1;
return ((cqr->status != DASD_CQR_DONE) &&
(cqr->status != DASD_CQR_FAILED));
} else
return (cqr->status == DASD_CQR_FILLED);
}
static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
{
struct dasd_device *device;
int rc;
struct list_head ccw_queue;
struct dasd_ccw_req *cqr;
INIT_LIST_HEAD(&ccw_queue);
maincqr->status = DASD_CQR_FILLED;
device = maincqr->startdev;
list_add(&maincqr->blocklist, &ccw_queue);
for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
cqr = list_first_entry(&ccw_queue,
struct dasd_ccw_req, blocklist)) {
if (__dasd_sleep_on_erp(cqr))
continue;
if (cqr->status != DASD_CQR_FILLED) /* could be failed */
continue;
if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -EPERM;
continue;
}
/* Non-temporary stop condition will trigger fail fast */
if (device->stopped & ~DASD_STOPPED_PENDING &&
test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
!dasd_eer_enabled(device) && device->aq_mask == 0) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -ENOLINK;
continue;
}
/*
* Don't try to start requests if device is in
* offline processing, it might wait forever
*/
if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -ENODEV;
continue;
}
/*
* Don't try to start requests if device is stopped
* except path verification requests
*/
if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
if (interruptible) {
rc = wait_event_interruptible(
generic_waitq, !(device->stopped));
if (rc == -ERESTARTSYS) {
cqr->status = DASD_CQR_FAILED;
maincqr->intrc = rc;
continue;
}
} else
wait_event(generic_waitq, !(device->stopped));
}
if (!cqr->callback)
cqr->callback = dasd_wakeup_cb;
cqr->callback_data = DASD_SLEEPON_START_TAG;
dasd_add_request_tail(cqr);
if (interruptible) {
rc = wait_event_interruptible(
generic_waitq, _wait_for_wakeup(cqr));
if (rc == -ERESTARTSYS) {
dasd_cancel_req(cqr);
/* wait (non-interruptible) for final status */
wait_event(generic_waitq,
_wait_for_wakeup(cqr));
cqr->status = DASD_CQR_FAILED;
maincqr->intrc = rc;
continue;
}
} else
wait_event(generic_waitq, _wait_for_wakeup(cqr));
}
maincqr->endclk = get_tod_clock();
if ((maincqr->status != DASD_CQR_DONE) &&
(maincqr->intrc != -ERESTARTSYS))
dasd_log_sense(maincqr, &maincqr->irb);
if (maincqr->status == DASD_CQR_DONE)
rc = 0;
else if (maincqr->intrc)
rc = maincqr->intrc;
else
rc = -EIO;
return rc;
}
static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
{
struct dasd_ccw_req *cqr;
list_for_each_entry(cqr, ccw_queue, blocklist) {
if (cqr->callback_data != DASD_SLEEPON_END_TAG)
return 0;
}
return 1;
}
static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
{
struct dasd_device *device;
struct dasd_ccw_req *cqr, *n;
u8 *sense = NULL;
int rc;
retry:
list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
device = cqr->startdev;
if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
continue;
if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -EPERM;
continue;
}
/*Non-temporary stop condition will trigger fail fast*/
if (device->stopped & ~DASD_STOPPED_PENDING &&
test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
!dasd_eer_enabled(device)) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -EAGAIN;
continue;
}
/*Don't try to start requests if device is stopped*/
if (interruptible) {
rc = wait_event_interruptible(
generic_waitq, !device->stopped);
if (rc == -ERESTARTSYS) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = rc;
continue;
}
} else
wait_event(generic_waitq, !(device->stopped));
if (!cqr->callback)
cqr->callback = dasd_wakeup_cb;
cqr->callback_data = DASD_SLEEPON_START_TAG;
dasd_add_request_tail(cqr);
}
wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
rc = 0;
list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
/*
* In some cases the 'File Protected' or 'Incorrect Length'
* error might be expected and error recovery would be
* unnecessary in these cases. Check if the according suppress
* bit is set.
*/
sense = dasd_get_sense(&cqr->irb);
if (sense && sense[1] & SNS1_FILE_PROTECTED &&
test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags))
continue;
if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
continue;
/*
* for alias devices simplify error recovery and
* return to upper layer
* do not skip ERP requests
*/
if (cqr->startdev != cqr->basedev && !cqr->refers &&
(cqr->status == DASD_CQR_TERMINATED ||
cqr->status == DASD_CQR_NEED_ERP))
return -EAGAIN;
/* normal recovery for basedev IO */
if (__dasd_sleep_on_erp(cqr))
/* handle erp first */
goto retry;
}
return 0;
}
/*
* Queue a request to the tail of the device ccw_queue and wait for
* it's completion.
*/
int dasd_sleep_on(struct dasd_ccw_req *cqr)
{
return _dasd_sleep_on(cqr, 0);
}
EXPORT_SYMBOL(dasd_sleep_on);
/*
* Start requests from a ccw_queue and wait for their completion.
*/
int dasd_sleep_on_queue(struct list_head *ccw_queue)
{
return _dasd_sleep_on_queue(ccw_queue, 0);
}
EXPORT_SYMBOL(dasd_sleep_on_queue);
/*
* Start requests from a ccw_queue and wait interruptible for their completion.
*/
int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
{
return _dasd_sleep_on_queue(ccw_queue, 1);
}
EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
/*
* Queue a request to the tail of the device ccw_queue and wait
* interruptible for it's completion.
*/
int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
{
return _dasd_sleep_on(cqr, 1);
}
EXPORT_SYMBOL(dasd_sleep_on_interruptible);
/*
* Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
* for eckd devices) the currently running request has to be terminated
* and be put back to status queued, before the special request is added
* to the head of the queue. Then the special request is waited on normally.
*/
static inline int _dasd_term_running_cqr(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
int rc;
if (list_empty(&device->ccw_queue))
return 0;
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
rc = device->discipline->term_IO(cqr);
if (!rc)
/*
* CQR terminated because a more important request is pending.
* Undo decreasing of retry counter because this is
* not an error case.
*/
cqr->retries++;
return rc;
}
int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int rc;
device = cqr->startdev;
if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -EPERM;
return -EIO;
}
spin_lock_irq(get_ccwdev_lock(device->cdev));
rc = _dasd_term_running_cqr(device);
if (rc) {
spin_unlock_irq(get_ccwdev_lock(device->cdev));
return rc;
}
cqr->callback = dasd_wakeup_cb;
cqr->callback_data = DASD_SLEEPON_START_TAG;
cqr->status = DASD_CQR_QUEUED;
/*
* add new request as second
* first the terminated cqr needs to be finished
*/
list_add(&cqr->devlist, device->ccw_queue.next);
/* let the bh start the request to keep them in order */
dasd_schedule_device_bh(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
wait_event(generic_waitq, _wait_for_wakeup(cqr));
if (cqr->status == DASD_CQR_DONE)
rc = 0;
else if (cqr->intrc)
rc = cqr->intrc;
else
rc = -EIO;
/* kick tasklets */
dasd_schedule_device_bh(device);
if (device->block)
dasd_schedule_block_bh(device->block);
return rc;
}
EXPORT_SYMBOL(dasd_sleep_on_immediatly);
/*
* Cancels a request that was started with dasd_sleep_on_req.
* This is useful to timeout requests. The request will be
* terminated if it is currently in i/o.
* Returns 0 if request termination was successful
* negative error code if termination failed
* Cancellation of a request is an asynchronous operation! The calling
* function has to wait until the request is properly returned via callback.
*/
static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
{
struct dasd_device *device = cqr->startdev;
int rc = 0;
switch (cqr->status) {
case DASD_CQR_QUEUED:
/* request was not started - just set to cleared */
cqr->status = DASD_CQR_CLEARED;
break;
case DASD_CQR_IN_IO:
/* request in IO - terminate IO and release again */
rc = device->discipline->term_IO(cqr);
if (rc) {
dev_err(&device->cdev->dev,
"Cancelling request %p failed with rc=%d\n",
cqr, rc);
} else {
cqr->stopclk = get_tod_clock();
}
break;
default: /* already finished or clear pending - do nothing */
break;
}
dasd_schedule_device_bh(device);
return rc;
}
int dasd_cancel_req(struct dasd_ccw_req *cqr)
{
struct dasd_device *device = cqr->startdev;
unsigned long flags;
int rc;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
rc = __dasd_cancel_req(cqr);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return rc;
}
/*
* SECTION: Operations of the dasd_block layer.
*/
/*
* Timeout function for dasd_block. This is used when the block layer
* is waiting for something that may not come reliably, (e.g. a state
* change interrupt)
*/
static void dasd_block_timeout(struct timer_list *t)
{
unsigned long flags;
struct dasd_block *block;
block = from_timer(block, t, timer);
spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
dasd_schedule_block_bh(block);
blk_mq_run_hw_queues(block->gdp->queue, true);
}
/*
* Setup timeout for a dasd_block in jiffies.
*/
void dasd_block_set_timer(struct dasd_block *block, int expires)
{
if (expires == 0)
del_timer(&block->timer);
else
mod_timer(&block->timer, jiffies + expires);
}
EXPORT_SYMBOL(dasd_block_set_timer);
/*
* Clear timeout for a dasd_block.
*/
void dasd_block_clear_timer(struct dasd_block *block)
{
del_timer(&block->timer);
}
EXPORT_SYMBOL(dasd_block_clear_timer);
/*
* Process finished error recovery ccw.
*/
static void __dasd_process_erp(struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
dasd_erp_fn_t erp_fn;
if (cqr->status == DASD_CQR_DONE)
DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
else
dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
erp_fn = device->discipline->erp_postaction(cqr);
erp_fn(cqr);
}
static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
{
struct request *req;
blk_status_t error = BLK_STS_OK;
unsigned int proc_bytes;
int status;
req = (struct request *) cqr->callback_data;
dasd_profile_end(cqr->block, cqr, req);
proc_bytes = cqr->proc_bytes;
status = cqr->block->base->discipline->free_cp(cqr, req);
if (status < 0)
error = errno_to_blk_status(status);
else if (status == 0) {
switch (cqr->intrc) {
case -EPERM:
/*
* DASD doesn't implement SCSI/NVMe reservations, but it
* implements a locking scheme similar to them. We
* return this error when we no longer have the lock.
*/
error = BLK_STS_RESV_CONFLICT;
break;
case -ENOLINK:
error = BLK_STS_TRANSPORT;
break;
case -ETIMEDOUT:
error = BLK_STS_TIMEOUT;
break;
default:
error = BLK_STS_IOERR;
break;
}
}
/*
* We need to take care for ETIMEDOUT errors here since the
* complete callback does not get called in this case.
* Take care of all errors here and avoid additional code to
* transfer the error value to the complete callback.
*/
if (error) {
blk_mq_end_request(req, error);
blk_mq_run_hw_queues(req->q, true);
} else {
/*
* Partial completed requests can happen with ESE devices.
* During read we might have gotten a NRF error and have to
* complete a request partially.
*/
if (proc_bytes) {
blk_update_request(req, BLK_STS_OK, proc_bytes);
blk_mq_requeue_request(req, true);
} else if (likely(!blk_should_fake_timeout(req->q))) {
blk_mq_complete_request(req);
}
}
}
/*
* Process ccw request queue.
*/
static void __dasd_process_block_ccw_queue(struct dasd_block *block,
struct list_head *final_queue)
{
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
dasd_erp_fn_t erp_fn;
unsigned long flags;
struct dasd_device *base = block->base;
restart:
/* Process request with final status. */
list_for_each_safe(l, n, &block->ccw_queue) {
cqr = list_entry(l, struct dasd_ccw_req, blocklist);
if (cqr->status != DASD_CQR_DONE &&
cqr->status != DASD_CQR_FAILED &&
cqr->status != DASD_CQR_NEED_ERP &&
cqr->status != DASD_CQR_TERMINATED)
continue;
if (cqr->status == DASD_CQR_TERMINATED) {
base->discipline->handle_terminated_request(cqr);
goto restart;
}
/* Process requests that may be recovered */
if (cqr->status == DASD_CQR_NEED_ERP) {
erp_fn = base->discipline->erp_action(cqr);
if (IS_ERR(erp_fn(cqr)))
continue;
goto restart;
}
/* log sense for fatal error */
if (cqr->status == DASD_CQR_FAILED) {
dasd_log_sense(cqr, &cqr->irb);
}
/*
* First call extended error reporting and check for autoquiesce
*/
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
if (cqr->status == DASD_CQR_FAILED &&
dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) {
cqr->status = DASD_CQR_FILLED;
cqr->retries = 255;
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
goto restart;
}
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
/* Process finished ERP request. */
if (cqr->refers) {
__dasd_process_erp(base, cqr);
goto restart;
}
/* Rechain finished requests to final queue */
cqr->endclk = get_tod_clock();
list_move_tail(&cqr->blocklist, final_queue);
}
}
static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
{
dasd_schedule_block_bh(cqr->block);
}
static void __dasd_block_start_head(struct dasd_block *block)
{
struct dasd_ccw_req *cqr;
if (list_empty(&block->ccw_queue))
return;
/* We allways begin with the first requests on the queue, as some
* of previously started requests have to be enqueued on a
* dasd_device again for error recovery.
*/
list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
if (cqr->status != DASD_CQR_FILLED)
continue;
if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
!test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -EPERM;
dasd_schedule_block_bh(block);
continue;
}
/* Non-temporary stop condition will trigger fail fast */
if (block->base->stopped & ~DASD_STOPPED_PENDING &&
test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
!dasd_eer_enabled(block->base) && block->base->aq_mask == 0) {
cqr->status = DASD_CQR_FAILED;
cqr->intrc = -ENOLINK;
dasd_schedule_block_bh(block);
continue;
}
/* Don't try to start requests if device is stopped */
if (block->base->stopped)
return;
/* just a fail safe check, should not happen */
if (!cqr->startdev)
cqr->startdev = block->base;
/* make sure that the requests we submit find their way back */
cqr->callback = dasd_return_cqr_cb;
dasd_add_request_tail(cqr);
}
}
/*
* Central dasd_block layer routine. Takes requests from the generic
* block layer request queue, creates ccw requests, enqueues them on
* a dasd_device and processes ccw requests that have been returned.
*/
static void dasd_block_tasklet(unsigned long data)
{
struct dasd_block *block = (struct dasd_block *) data;
struct list_head final_queue;
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
struct dasd_queue *dq;
atomic_set(&block->tasklet_scheduled, 0);
INIT_LIST_HEAD(&final_queue);
spin_lock_irq(&block->queue_lock);
/* Finish off requests on ccw queue */
__dasd_process_block_ccw_queue(block, &final_queue);
spin_unlock_irq(&block->queue_lock);
/* Now call the callback function of requests with final status */
list_for_each_safe(l, n, &final_queue) {
cqr = list_entry(l, struct dasd_ccw_req, blocklist);
dq = cqr->dq;
spin_lock_irq(&dq->lock);
list_del_init(&cqr->blocklist);
__dasd_cleanup_cqr(cqr);
spin_unlock_irq(&dq->lock);
}
spin_lock_irq(&block->queue_lock);
/* Now check if the head of the ccw queue needs to be started. */
__dasd_block_start_head(block);
spin_unlock_irq(&block->queue_lock);
if (waitqueue_active(&shutdown_waitq))
wake_up(&shutdown_waitq);
dasd_put_device(block->base);
}
static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
{
wake_up(&dasd_flush_wq);
}
/*
* Requeue a request back to the block request queue
* only works for block requests
*/
static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
{
struct request *req;
/*
* If the request is an ERP request there is nothing to requeue.
* This will be done with the remaining original request.
*/
if (cqr->refers)
return;
spin_lock_irq(&cqr->dq->lock);
req = (struct request *) cqr->callback_data;
blk_mq_requeue_request(req, true);
spin_unlock_irq(&cqr->dq->lock);
return;
}
static int _dasd_requests_to_flushqueue(struct dasd_block *block,
struct list_head *flush_queue)
{
struct dasd_ccw_req *cqr, *n;
unsigned long flags;
int rc, i;
spin_lock_irqsave(&block->queue_lock, flags);
rc = 0;
restart:
list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
/* if this request currently owned by a dasd_device cancel it */
if (cqr->status >= DASD_CQR_QUEUED)
rc = dasd_cancel_req(cqr);
if (rc < 0)
break;
/* Rechain request (including erp chain) so it won't be
* touched by the dasd_block_tasklet anymore.
* Replace the callback so we notice when the request
* is returned from the dasd_device layer.
*/
cqr->callback = _dasd_wake_block_flush_cb;
for (i = 0; cqr; cqr = cqr->refers, i++)
list_move_tail(&cqr->blocklist, flush_queue);
if (i > 1)
/* moved more than one request - need to restart */
goto restart;
}
spin_unlock_irqrestore(&block->queue_lock, flags);
return rc;
}
/*
* Go through all request on the dasd_block request queue, cancel them
* on the respective dasd_device, and return them to the generic
* block layer.
*/
static int dasd_flush_block_queue(struct dasd_block *block)
{
struct dasd_ccw_req *cqr, *n;
struct list_head flush_queue;
unsigned long flags;
int rc;
INIT_LIST_HEAD(&flush_queue);
rc = _dasd_requests_to_flushqueue(block, &flush_queue);
/* Now call the callback function of flushed requests */
restart_cb:
list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
/* Process finished ERP request. */
if (cqr->refers) {
spin_lock_bh(&block->queue_lock);
__dasd_process_erp(block->base, cqr);
spin_unlock_bh(&block->queue_lock);
/* restart list_for_xx loop since dasd_process_erp
* might remove multiple elements */
goto restart_cb;
}
/* call the callback function */
spin_lock_irqsave(&cqr->dq->lock, flags);
cqr->endclk = get_tod_clock();
list_del_init(&cqr->blocklist);
__dasd_cleanup_cqr(cqr);
spin_unlock_irqrestore(&cqr->dq->lock, flags);
}
return rc;
}
/*
* Schedules a call to dasd_tasklet over the device tasklet.
*/
void dasd_schedule_block_bh(struct dasd_block *block)
{
/* Protect against rescheduling. */
if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
return;
/* life cycle of block is bound to it's base device */
dasd_get_device(block->base);
tasklet_hi_schedule(&block->tasklet);
}
EXPORT_SYMBOL(dasd_schedule_block_bh);
/*
* SECTION: external block device operations
* (request queue handling, open, release, etc.)
*/
/*
* Dasd request queue function. Called from ll_rw_blk.c
*/
static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd)
{
struct dasd_block *block = hctx->queue->queuedata;
struct dasd_queue *dq = hctx->driver_data;
struct request *req = qd->rq;
struct dasd_device *basedev;
struct dasd_ccw_req *cqr;
blk_status_t rc = BLK_STS_OK;
basedev = block->base;
spin_lock_irq(&dq->lock);
if (basedev->state < DASD_STATE_READY ||
test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"device not ready for request %p", req);
rc = BLK_STS_IOERR;
goto out;
}
/*
* if device is stopped do not fetch new requests
* except failfast is active which will let requests fail
* immediately in __dasd_block_start_head()
*/
if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"device stopped request %p", req);
rc = BLK_STS_RESOURCE;
goto out;
}
if (basedev->features & DASD_FEATURE_READONLY &&
rq_data_dir(req) == WRITE) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"Rejecting write request %p", req);
rc = BLK_STS_IOERR;
goto out;
}
if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
(basedev->features & DASD_FEATURE_FAILFAST ||
blk_noretry_request(req))) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"Rejecting failfast request %p", req);
rc = BLK_STS_IOERR;
goto out;
}
cqr = basedev->discipline->build_cp(basedev, block, req);
if (IS_ERR(cqr)) {
if (PTR_ERR(cqr) == -EBUSY ||
PTR_ERR(cqr) == -ENOMEM ||
PTR_ERR(cqr) == -EAGAIN) {
rc = BLK_STS_RESOURCE;
goto out;
}
DBF_DEV_EVENT(DBF_ERR, basedev,
"CCW creation failed (rc=%ld) on request %p",
PTR_ERR(cqr), req);
rc = BLK_STS_IOERR;
goto out;
}
/*
* Note: callback is set to dasd_return_cqr_cb in
* __dasd_block_start_head to cover erp requests as well
*/
cqr->callback_data = req;
cqr->status = DASD_CQR_FILLED;
cqr->dq = dq;
blk_mq_start_request(req);
spin_lock(&block->queue_lock);
list_add_tail(&cqr->blocklist, &block->ccw_queue);
INIT_LIST_HEAD(&cqr->devlist);
dasd_profile_start(block, cqr, req);
dasd_schedule_block_bh(block);
spin_unlock(&block->queue_lock);
out:
spin_unlock_irq(&dq->lock);
return rc;
}
/*
* Block timeout callback, called from the block layer
*
* Return values:
* BLK_EH_RESET_TIMER if the request should be left running
* BLK_EH_DONE if the request is handled or terminated
* by the driver.
*/
enum blk_eh_timer_return dasd_times_out(struct request *req)
{
struct dasd_block *block = req->q->queuedata;
struct dasd_device *device;
struct dasd_ccw_req *cqr;
unsigned long flags;
int rc = 0;
cqr = blk_mq_rq_to_pdu(req);
if (!cqr)
return BLK_EH_DONE;
spin_lock_irqsave(&cqr->dq->lock, flags);
device = cqr->startdev ? cqr->startdev : block->base;
if (!device->blk_timeout) {
spin_unlock_irqrestore(&cqr->dq->lock, flags);
return BLK_EH_RESET_TIMER;
}
DBF_DEV_EVENT(DBF_WARNING, device,
" dasd_times_out cqr %p status %x",
cqr, cqr->status);
spin_lock(&block->queue_lock);
spin_lock(get_ccwdev_lock(device->cdev));
cqr->retries = -1;
cqr->intrc = -ETIMEDOUT;
if (cqr->status >= DASD_CQR_QUEUED) {
rc = __dasd_cancel_req(cqr);
} else if (cqr->status == DASD_CQR_FILLED ||
cqr->status == DASD_CQR_NEED_ERP) {
cqr->status = DASD_CQR_TERMINATED;
} else if (cqr->status == DASD_CQR_IN_ERP) {
struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
list_for_each_entry_safe(searchcqr, nextcqr,
&block->ccw_queue, blocklist) {
tmpcqr = searchcqr;
while (tmpcqr->refers)
tmpcqr = tmpcqr->refers;
if (tmpcqr != cqr)
continue;
/* searchcqr is an ERP request for cqr */
searchcqr->retries = -1;
searchcqr->intrc = -ETIMEDOUT;
if (searchcqr->status >= DASD_CQR_QUEUED) {
rc = __dasd_cancel_req(searchcqr);
} else if ((searchcqr->status == DASD_CQR_FILLED) ||
(searchcqr->status == DASD_CQR_NEED_ERP)) {
searchcqr->status = DASD_CQR_TERMINATED;
rc = 0;
} else if (searchcqr->status == DASD_CQR_IN_ERP) {
/*
* Shouldn't happen; most recent ERP
* request is at the front of queue
*/
continue;
}
break;
}
}
spin_unlock(get_ccwdev_lock(device->cdev));
dasd_schedule_block_bh(block);
spin_unlock(&block->queue_lock);
spin_unlock_irqrestore(&cqr->dq->lock, flags);
return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
}
static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int idx)
{
struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
if (!dq)
return -ENOMEM;
spin_lock_init(&dq->lock);
hctx->driver_data = dq;
return 0;
}
static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
{
kfree(hctx->driver_data);
hctx->driver_data = NULL;
}
static void dasd_request_done(struct request *req)
{
blk_mq_end_request(req, 0);
blk_mq_run_hw_queues(req->q, true);
}
struct blk_mq_ops dasd_mq_ops = {
.queue_rq = do_dasd_request,
.complete = dasd_request_done,
.timeout = dasd_times_out,
.init_hctx = dasd_init_hctx,
.exit_hctx = dasd_exit_hctx,
};
static int dasd_open(struct gendisk *disk, blk_mode_t mode)
{
struct dasd_device *base;
int rc;
base = dasd_device_from_gendisk(disk);
if (!base)
return -ENODEV;
atomic_inc(&base->block->open_count);
if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
rc = -ENODEV;
goto unlock;
}
if (!try_module_get(base->discipline->owner)) {
rc = -EINVAL;
goto unlock;
}
if (dasd_probeonly) {
dev_info(&base->cdev->dev,
"Accessing the DASD failed because it is in "
"probeonly mode\n");
rc = -EPERM;
goto out;
}
if (base->state <= DASD_STATE_BASIC) {
DBF_DEV_EVENT(DBF_ERR, base, " %s",
" Cannot open unrecognized device");
rc = -ENODEV;
goto out;
}
if ((mode & BLK_OPEN_WRITE) &&
(test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
(base->features & DASD_FEATURE_READONLY))) {
rc = -EROFS;
goto out;
}
dasd_put_device(base);
return 0;
out:
module_put(base->discipline->owner);
unlock:
atomic_dec(&base->block->open_count);
dasd_put_device(base);
return rc;
}
static void dasd_release(struct gendisk *disk)
{
struct dasd_device *base = dasd_device_from_gendisk(disk);
if (base) {
atomic_dec(&base->block->open_count);
module_put(base->discipline->owner);
dasd_put_device(base);
}
}
/*
* Return disk geometry.
*/
static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct dasd_device *base;
base = dasd_device_from_gendisk(bdev->bd_disk);
if (!base)
return -ENODEV;
if (!base->discipline ||
!base->discipline->fill_geometry) {
dasd_put_device(base);
return -EINVAL;
}
base->discipline->fill_geometry(base->block, geo);
geo->start = get_start_sect(bdev) >> base->block->s2b_shift;
dasd_put_device(base);
return 0;
}
const struct block_device_operations
dasd_device_operations = {
.owner = THIS_MODULE,
.open = dasd_open,
.release = dasd_release,
.ioctl = dasd_ioctl,
.compat_ioctl = dasd_ioctl,
.getgeo = dasd_getgeo,
.set_read_only = dasd_set_read_only,
};
/*******************************************************************************
* end of block device operations
*/
static void
dasd_exit(void)
{
#ifdef CONFIG_PROC_FS
dasd_proc_exit();
#endif
dasd_eer_exit();
kmem_cache_destroy(dasd_page_cache);
dasd_page_cache = NULL;
dasd_gendisk_exit();
dasd_devmap_exit();
if (dasd_debug_area != NULL) {
debug_unregister(dasd_debug_area);
dasd_debug_area = NULL;
}
dasd_statistics_removeroot();
}
/*
* SECTION: common functions for ccw_driver use
*/
/*
* Is the device read-only?
* Note that this function does not report the setting of the
* readonly device attribute, but how it is configured in z/VM.
*/
int dasd_device_is_ro(struct dasd_device *device)
{
struct ccw_dev_id dev_id;
struct diag210 diag_data;
int rc;
if (!MACHINE_IS_VM)
return 0;
ccw_device_get_id(device->cdev, &dev_id);
memset(&diag_data, 0, sizeof(diag_data));
diag_data.vrdcdvno = dev_id.devno;
diag_data.vrdclen = sizeof(diag_data);
rc = diag210(&diag_data);
if (rc == 0 || rc == 2) {
return diag_data.vrdcvfla & 0x80;
} else {
DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
dev_id.devno, rc);
return 0;
}
}
EXPORT_SYMBOL_GPL(dasd_device_is_ro);
static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
{
struct ccw_device *cdev = data;
int ret;
ret = ccw_device_set_online(cdev);
if (ret)
pr_warn("%s: Setting the DASD online failed with rc=%d\n",
dev_name(&cdev->dev), ret);
}
/*
* Initial attempt at a probe function. this can be simplified once
* the other detection code is gone.
*/
int dasd_generic_probe(struct ccw_device *cdev)
{
cdev->handler = &dasd_int_handler;
/*
* Automatically online either all dasd devices (dasd_autodetect)
* or all devices specified with dasd= parameters during
* initial probe.
*/
if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
(dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
async_schedule(dasd_generic_auto_online, cdev);
return 0;
}
EXPORT_SYMBOL_GPL(dasd_generic_probe);
void dasd_generic_free_discipline(struct dasd_device *device)
{
/* Forget the discipline information. */
if (device->discipline) {
if (device->discipline->uncheck_device)
device->discipline->uncheck_device(device);
module_put(device->discipline->owner);
device->discipline = NULL;
}
if (device->base_discipline) {
module_put(device->base_discipline->owner);
device->base_discipline = NULL;
}
}
EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
/*
* This will one day be called from a global not_oper handler.
* It is also used by driver_unregister during module unload.
*/
void dasd_generic_remove(struct ccw_device *cdev)
{
struct dasd_device *device;
struct dasd_block *block;
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return;
if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/* Already doing offline processing */
dasd_put_device(device);
return;
}
/*
* This device is removed unconditionally. Set offline
* flag to prevent dasd_open from opening it while it is
* no quite down yet.
*/
dasd_set_target_state(device, DASD_STATE_NEW);
cdev->handler = NULL;
/* dasd_delete_device destroys the device reference. */
block = device->block;
dasd_delete_device(device);
/*
* life cycle of block is bound to device, so delete it after
* device was safely removed
*/
if (block)
dasd_free_block(block);
}
EXPORT_SYMBOL_GPL(dasd_generic_remove);
/*
* Activate a device. This is called from dasd_{eckd,fba}_probe() when either
* the device is detected for the first time and is supposed to be used
* or the user has started activation through sysfs.
*/
int dasd_generic_set_online(struct ccw_device *cdev,
struct dasd_discipline *base_discipline)
{
struct dasd_discipline *discipline;
struct dasd_device *device;
int rc;
/* first online clears initial online feature flag */
dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
device = dasd_create_device(cdev);
if (IS_ERR(device))
return PTR_ERR(device);
discipline = base_discipline;
if (device->features & DASD_FEATURE_USEDIAG) {
if (!dasd_diag_discipline_pointer) {
/* Try to load the required module. */
rc = request_module(DASD_DIAG_MOD);
if (rc) {
pr_warn("%s Setting the DASD online failed "
"because the required module %s "
"could not be loaded (rc=%d)\n",
dev_name(&cdev->dev), DASD_DIAG_MOD,
rc);
dasd_delete_device(device);
return -ENODEV;
}
}
/* Module init could have failed, so check again here after
* request_module(). */
if (!dasd_diag_discipline_pointer) {
pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
dev_name(&cdev->dev));
dasd_delete_device(device);
return -ENODEV;
}
discipline = dasd_diag_discipline_pointer;
}
if (!try_module_get(base_discipline->owner)) {
dasd_delete_device(device);
return -EINVAL;
}
if (!try_module_get(discipline->owner)) {
module_put(base_discipline->owner);
dasd_delete_device(device);
return -EINVAL;
}
device->base_discipline = base_discipline;
device->discipline = discipline;
/* check_device will allocate block device if necessary */
rc = discipline->check_device(device);
if (rc) {
pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
dev_name(&cdev->dev), discipline->name, rc);
module_put(discipline->owner);
module_put(base_discipline->owner);
dasd_delete_device(device);
return rc;
}
dasd_set_target_state(device, DASD_STATE_ONLINE);
if (device->state <= DASD_STATE_KNOWN) {
pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
dev_name(&cdev->dev));
rc = -ENODEV;
dasd_set_target_state(device, DASD_STATE_NEW);
if (device->block)
dasd_free_block(device->block);
dasd_delete_device(device);
} else
pr_debug("dasd_generic device %s found\n",
dev_name(&cdev->dev));
wait_event(dasd_init_waitq, _wait_for_device(device));
dasd_put_device(device);
return rc;
}
EXPORT_SYMBOL_GPL(dasd_generic_set_online);
int dasd_generic_set_offline(struct ccw_device *cdev)
{
struct dasd_device *device;
struct dasd_block *block;
int max_count, open_count, rc;
unsigned long flags;
rc = 0;
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device)) {
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return PTR_ERR(device);
}
/*
* We must make sure that this device is currently not in use.
* The open_count is increased for every opener, that includes
* the blkdev_get in dasd_scan_partitions. We are only interested
* in the other openers.
*/
if (device->block) {
max_count = device->block->bdev ? 0 : -1;
open_count = atomic_read(&device->block->open_count);
if (open_count > max_count) {
if (open_count > 0)
pr_warn("%s: The DASD cannot be set offline with open count %i\n",
dev_name(&cdev->dev), open_count);
else
pr_warn("%s: The DASD cannot be set offline while it is in use\n",
dev_name(&cdev->dev));
rc = -EBUSY;
goto out_err;
}
}
/*
* Test if the offline processing is already running and exit if so.
* If a safe offline is being processed this could only be a normal
* offline that should be able to overtake the safe offline and
* cancel any I/O we do not want to wait for any longer
*/
if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
&device->flags);
} else {
rc = -EBUSY;
goto out_err;
}
}
set_bit(DASD_FLAG_OFFLINE, &device->flags);
/*
* if safe_offline is called set safe_offline_running flag and
* clear safe_offline so that a call to normal offline
* can overrun safe_offline processing
*/
if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
!test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
/* need to unlock here to wait for outstanding I/O */
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
/*
* If we want to set the device safe offline all IO operations
* should be finished before continuing the offline process
* so sync bdev first and then wait for our queues to become
* empty
*/
if (device->block)
bdev_mark_dead(device->block->bdev, false);
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,
_wait_for_empty_queues(device));
if (rc != 0)
goto interrupted;
/*
* check if a normal offline process overtook the offline
* processing in this case simply do nothing beside returning
* that we got interrupted
* otherwise mark safe offline as not running any longer and
* continue with normal offline
*/
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
rc = -ERESTARTSYS;
goto out_err;
}
clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
}
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
dasd_set_target_state(device, DASD_STATE_NEW);
/* dasd_delete_device destroys the device reference. */
block = device->block;
dasd_delete_device(device);
/*
* life cycle of block is bound to device, so delete it after
* device was safely removed
*/
if (block)
dasd_free_block(block);
return 0;
interrupted:
/* interrupted by signal */
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
clear_bit(DASD_FLAG_OFFLINE, &device->flags);
out_err:
dasd_put_device(device);
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return rc;
}
EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
int dasd_generic_last_path_gone(struct dasd_device *device)
{
struct dasd_ccw_req *cqr;
dev_warn(&device->cdev->dev, "No operational channel path is left "
"for the device\n");
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
/* First call extended error reporting and check for autoquiesce. */
dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
if (device->state < DASD_STATE_BASIC)
return 0;
/* Device is active. We want to keep it. */
list_for_each_entry(cqr, &device->ccw_queue, devlist)
if ((cqr->status == DASD_CQR_IN_IO) ||
(cqr->status == DASD_CQR_CLEAR_PENDING)) {
cqr->status = DASD_CQR_QUEUED;
cqr->retries++;
}
dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
return 1;
}
EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
int dasd_generic_path_operational(struct dasd_device *device)
{
dev_info(&device->cdev->dev, "A channel path to the device has become "
"operational\n");
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
dasd_schedule_device_bh(device);
if (device->block) {
dasd_schedule_block_bh(device->block);
if (device->block->gdp)
blk_mq_run_hw_queues(device->block->gdp->queue, true);
}
if (!device->stopped)
wake_up(&generic_waitq);
return 1;
}
EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
int dasd_generic_notify(struct ccw_device *cdev, int event)
{
struct dasd_device *device;
int ret;
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device))
return 0;
ret = 0;
switch (event) {
case CIO_GONE:
case CIO_BOXED:
case CIO_NO_PATH:
dasd_path_no_path(device);
ret = dasd_generic_last_path_gone(device);
break;
case CIO_OPER:
ret = 1;
if (dasd_path_get_opm(device))
ret = dasd_generic_path_operational(device);
break;
}
dasd_put_device(device);
return ret;
}
EXPORT_SYMBOL_GPL(dasd_generic_notify);
void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
{
struct dasd_device *device;
int chp, oldopm, hpfpm, ifccpm;
device = dasd_device_from_cdev_locked(cdev);
if (IS_ERR(device))
return;
oldopm = dasd_path_get_opm(device);
for (chp = 0; chp < 8; chp++) {
if (path_event[chp] & PE_PATH_GONE) {
dasd_path_notoper(device, chp);
}
if (path_event[chp] & PE_PATH_AVAILABLE) {
dasd_path_available(device, chp);
dasd_schedule_device_bh(device);
}
if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
if (!dasd_path_is_operational(device, chp) &&
!dasd_path_need_verify(device, chp)) {
/*
* we can not establish a pathgroup on an
* unavailable path, so trigger a path
* verification first
*/
dasd_path_available(device, chp);
dasd_schedule_device_bh(device);
}
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Pathgroup re-established\n");
if (device->discipline->kick_validate)
device->discipline->kick_validate(device);
}
if (path_event[chp] & PE_PATH_FCES_EVENT) {
dasd_path_fcsec_update(device, chp);
dasd_schedule_device_bh(device);
}
}
hpfpm = dasd_path_get_hpfpm(device);
ifccpm = dasd_path_get_ifccpm(device);
if (!dasd_path_get_opm(device) && hpfpm) {
/*
* device has no operational paths but at least one path is
* disabled due to HPF errors
* disable HPF at all and use the path(s) again
*/
if (device->discipline->disable_hpf)
device->discipline->disable_hpf(device);
dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
dasd_path_set_tbvpm(device, hpfpm);
dasd_schedule_device_bh(device);
dasd_schedule_requeue(device);
} else if (!dasd_path_get_opm(device) && ifccpm) {
/*
* device has no operational paths but at least one path is
* disabled due to IFCC errors
* trigger path verification on paths with IFCC errors
*/
dasd_path_set_tbvpm(device, ifccpm);
dasd_schedule_device_bh(device);
}
if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
dev_warn(&device->cdev->dev,
"No verified channel paths remain for the device\n");
DBF_DEV_EVENT(DBF_WARNING, device,
"%s", "last verified path gone");
/* First call extended error reporting and check for autoquiesce. */
dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
dasd_device_set_stop_bits(device,
DASD_STOPPED_DC_WAIT);
}
dasd_put_device(device);
}
EXPORT_SYMBOL_GPL(dasd_generic_path_event);
int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
{
if (!dasd_path_get_opm(device) && lpm) {
dasd_path_set_opm(device, lpm);
dasd_generic_path_operational(device);
} else
dasd_path_add_opm(device, lpm);
return 0;
}
EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
void dasd_generic_space_exhaust(struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
/* First call extended error reporting and check for autoquiesce. */
dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC);
if (device->state < DASD_STATE_BASIC)
return;
if (cqr->status == DASD_CQR_IN_IO ||
cqr->status == DASD_CQR_CLEAR_PENDING) {
cqr->status = DASD_CQR_QUEUED;
cqr->retries++;
}
dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
dasd_device_clear_timer(device);
dasd_schedule_device_bh(device);
}
EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
void dasd_generic_space_avail(struct dasd_device *device)
{
dev_info(&device->cdev->dev, "Extent pool space is available\n");
DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
dasd_schedule_device_bh(device);
if (device->block) {
dasd_schedule_block_bh(device->block);
if (device->block->gdp)
blk_mq_run_hw_queues(device->block->gdp->queue, true);
}
if (!device->stopped)
wake_up(&generic_waitq);
}
EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
/*
* clear active requests and requeue them to block layer if possible
*/
int dasd_generic_requeue_all_requests(struct dasd_device *device)
{
struct dasd_block *block = device->block;
struct list_head requeue_queue;
struct dasd_ccw_req *cqr, *n;
int rc;
if (!block)
return 0;
INIT_LIST_HEAD(&requeue_queue);
rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
/* Now call the callback function of flushed requests */
restart_cb:
list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
/* Process finished ERP request. */
if (cqr->refers) {
spin_lock_bh(&block->queue_lock);
__dasd_process_erp(block->base, cqr);
spin_unlock_bh(&block->queue_lock);
/* restart list_for_xx loop since dasd_process_erp
* might remove multiple elements
*/
goto restart_cb;
}
_dasd_requeue_request(cqr);
list_del_init(&cqr->blocklist);
cqr->block->base->discipline->free_cp(
cqr, (struct request *) cqr->callback_data);
}
dasd_schedule_device_bh(device);
return rc;
}
EXPORT_SYMBOL_GPL(dasd_generic_requeue_all_requests);
static void do_requeue_requests(struct work_struct *work)
{
struct dasd_device *device = container_of(work, struct dasd_device,
requeue_requests);
dasd_generic_requeue_all_requests(device);
dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
if (device->block)
dasd_schedule_block_bh(device->block);
dasd_put_device(device);
}
void dasd_schedule_requeue(struct dasd_device *device)
{
dasd_get_device(device);
/* queue call to dasd_reload_device to the kernel event daemon. */
if (!schedule_work(&device->requeue_requests))
dasd_put_device(device);
}
EXPORT_SYMBOL(dasd_schedule_requeue);
static int dasd_handle_autoquiesce(struct dasd_device *device,
struct dasd_ccw_req *cqr,
unsigned int reason)
{
/* in any case write eer message with reason */
if (dasd_eer_enabled(device))
dasd_eer_write(device, cqr, reason);
if (!test_bit(reason, &device->aq_mask))
return 0;
/* notify eer about autoquiesce */
if (dasd_eer_enabled(device))
dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE);
pr_info("%s: The DASD has been put in the quiesce state\n",
dev_name(&device->cdev->dev));
dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE);
if (device->features & DASD_FEATURE_REQUEUEQUIESCE)
dasd_schedule_requeue(device);
return 1;
}
static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
int rdc_buffer_size,
int magic)
{
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
NULL);
if (IS_ERR(cqr)) {
/* internal error 13 - Allocating the RDC request failed*/
dev_err(&device->cdev->dev,
"An error occurred in the DASD device driver, "
"reason=%s\n", "13");
return cqr;
}
ccw = cqr->cpaddr;
ccw->cmd_code = CCW_CMD_RDC;
ccw->cda = (__u32)virt_to_phys(cqr->data);
ccw->flags = 0;
ccw->count = rdc_buffer_size;
cqr->startdev = device;
cqr->memdev = device;
cqr->expires = 10*HZ;
cqr->retries = 256;
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
return cqr;
}
int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
void *rdc_buffer, int rdc_buffer_size)
{
int ret;
struct dasd_ccw_req *cqr;
cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
ret = dasd_sleep_on(cqr);
if (ret == 0)
memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
dasd_sfree_request(cqr, cqr->memdev);
return ret;
}
EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
/*
* In command mode and transport mode we need to look for sense
* data in different places. The sense data itself is allways
* an array of 32 bytes, so we can unify the sense data access
* for both modes.
*/
char *dasd_get_sense(struct irb *irb)
{
struct tsb *tsb = NULL;
char *sense = NULL;
if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
if (irb->scsw.tm.tcw)
tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
if (tsb && tsb->length == 64 && tsb->flags)
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
sense = tsb->tsa.iostat.sense;
break;
case 2: /* tsa_ddpc */
sense = tsb->tsa.ddpc.sense;
break;
default:
/* currently we don't use interrogate data */
break;
}
} else if (irb->esw.esw0.erw.cons) {
sense = irb->ecw;
}
return sense;
}
EXPORT_SYMBOL_GPL(dasd_get_sense);
void dasd_generic_shutdown(struct ccw_device *cdev)
{
struct dasd_device *device;
device = dasd_device_from_cdev(cdev);
if (IS_ERR(device))
return;
if (device->block)
dasd_schedule_block_bh(device->block);
dasd_schedule_device_bh(device);
wait_event(shutdown_waitq, _wait_for_empty_queues(device));
}
EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
static int __init dasd_init(void)
{
int rc;
init_waitqueue_head(&dasd_init_waitq);
init_waitqueue_head(&dasd_flush_wq);
init_waitqueue_head(&generic_waitq);
init_waitqueue_head(&shutdown_waitq);
/* register 'common' DASD debug area, used for all DBF_XXX calls */
dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
if (dasd_debug_area == NULL) {
rc = -ENOMEM;
goto failed;
}
debug_register_view(dasd_debug_area, &debug_sprintf_view);
debug_set_level(dasd_debug_area, DBF_WARNING);
DBF_EVENT(DBF_EMERG, "%s", "debug area created");
dasd_diag_discipline_pointer = NULL;
dasd_statistics_createroot();
rc = dasd_devmap_init();
if (rc)
goto failed;
rc = dasd_gendisk_init();
if (rc)
goto failed;
rc = dasd_parse();
if (rc)
goto failed;
rc = dasd_eer_init();
if (rc)
goto failed;
#ifdef CONFIG_PROC_FS
rc = dasd_proc_init();
if (rc)
goto failed;
#endif
return 0;
failed:
pr_info("The DASD device driver could not be initialized\n");
dasd_exit();
return rc;
}
module_init(dasd_init);
module_exit(dasd_exit);
| linux-master | drivers/s390/block/dasd.c |
/*
* TURBOchannel bus services.
*
* Copyright (c) Harald Koerfgen, 1998
* Copyright (c) 2001, 2003, 2005, 2006, 2018 Maciej W. Rozycki
* Copyright (c) 2005 James Simmons
*
* This file is subject to the terms and conditions of the GNU
* General Public License. See the file "COPYING" in the main
* directory of this archive for more details.
*/
#include <linux/compiler.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tc.h>
#include <linux/types.h>
#include <asm/io.h>
static struct tc_bus tc_bus = {
.name = "TURBOchannel",
};
/*
* Probing for TURBOchannel modules.
*/
static void __init tc_bus_add_devices(struct tc_bus *tbus)
{
resource_size_t slotsize = tbus->info.slot_size << 20;
resource_size_t extslotsize = tbus->ext_slot_size;
resource_size_t slotaddr;
resource_size_t extslotaddr;
resource_size_t devsize;
void __iomem *module;
struct tc_dev *tdev;
int i, slot, err;
u8 pattern[4];
long offset;
for (slot = 0; slot < tbus->num_tcslots; slot++) {
slotaddr = tbus->slot_base + slot * slotsize;
extslotaddr = tbus->ext_slot_base + slot * extslotsize;
module = ioremap(slotaddr, slotsize);
BUG_ON(!module);
offset = TC_OLDCARD;
err = 0;
err |= tc_preadb(pattern + 0, module + offset + TC_PATTERN0);
err |= tc_preadb(pattern + 1, module + offset + TC_PATTERN1);
err |= tc_preadb(pattern + 2, module + offset + TC_PATTERN2);
err |= tc_preadb(pattern + 3, module + offset + TC_PATTERN3);
if (err)
goto out_err;
if (pattern[0] != 0x55 || pattern[1] != 0x00 ||
pattern[2] != 0xaa || pattern[3] != 0xff) {
offset = TC_NEWCARD;
err = 0;
err |= tc_preadb(pattern + 0,
module + offset + TC_PATTERN0);
err |= tc_preadb(pattern + 1,
module + offset + TC_PATTERN1);
err |= tc_preadb(pattern + 2,
module + offset + TC_PATTERN2);
err |= tc_preadb(pattern + 3,
module + offset + TC_PATTERN3);
if (err)
goto out_err;
}
if (pattern[0] != 0x55 || pattern[1] != 0x00 ||
pattern[2] != 0xaa || pattern[3] != 0xff)
goto out_err;
/* Found a board, allocate it an entry in the list */
tdev = kzalloc(sizeof(*tdev), GFP_KERNEL);
if (!tdev) {
pr_err("tc%x: unable to allocate tc_dev\n", slot);
goto out_err;
}
dev_set_name(&tdev->dev, "tc%x", slot);
tdev->bus = tbus;
tdev->dev.parent = &tbus->dev;
tdev->dev.bus = &tc_bus_type;
tdev->slot = slot;
/* TURBOchannel has 34-bit DMA addressing (16GiB space). */
tdev->dma_mask = DMA_BIT_MASK(34);
tdev->dev.dma_mask = &tdev->dma_mask;
tdev->dev.coherent_dma_mask = DMA_BIT_MASK(34);
for (i = 0; i < 8; i++) {
tdev->firmware[i] =
readb(module + offset + TC_FIRM_VER + 4 * i);
tdev->vendor[i] =
readb(module + offset + TC_VENDOR + 4 * i);
tdev->name[i] =
readb(module + offset + TC_MODULE + 4 * i);
}
tdev->firmware[8] = 0;
tdev->vendor[8] = 0;
tdev->name[8] = 0;
pr_info("%s: %s %s %s\n", dev_name(&tdev->dev), tdev->vendor,
tdev->name, tdev->firmware);
devsize = readb(module + offset + TC_SLOT_SIZE);
devsize <<= 22;
if (devsize <= slotsize) {
tdev->resource.start = slotaddr;
tdev->resource.end = slotaddr + devsize - 1;
} else if (devsize <= extslotsize) {
tdev->resource.start = extslotaddr;
tdev->resource.end = extslotaddr + devsize - 1;
} else {
pr_err("%s: Cannot provide slot space "
"(%ldMiB required, up to %ldMiB supported)\n",
dev_name(&tdev->dev), (long)(devsize >> 20),
(long)(max(slotsize, extslotsize) >> 20));
kfree(tdev);
goto out_err;
}
tdev->resource.name = tdev->name;
tdev->resource.flags = IORESOURCE_MEM;
tc_device_get_irq(tdev);
if (device_register(&tdev->dev)) {
put_device(&tdev->dev);
goto out_err;
}
list_add_tail(&tdev->node, &tbus->devices);
out_err:
iounmap(module);
}
}
/*
* The main entry.
*/
static int __init tc_init(void)
{
/* Initialize the TURBOchannel bus */
if (tc_bus_get_info(&tc_bus))
goto out_err;
INIT_LIST_HEAD(&tc_bus.devices);
dev_set_name(&tc_bus.dev, "tc");
if (device_register(&tc_bus.dev))
goto out_err_device;
if (tc_bus.info.slot_size) {
unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000;
pr_info("tc: TURBOchannel rev. %d at %d.%d MHz "
"(with%s parity)\n", tc_bus.info.revision,
tc_clock / 10, tc_clock % 10,
tc_bus.info.parity ? "" : "out");
tc_bus.resource[0].start = tc_bus.slot_base;
tc_bus.resource[0].end = tc_bus.slot_base +
(tc_bus.info.slot_size << 20) *
tc_bus.num_tcslots - 1;
tc_bus.resource[0].name = tc_bus.name;
tc_bus.resource[0].flags = IORESOURCE_MEM;
if (request_resource(&iomem_resource,
&tc_bus.resource[0]) < 0) {
pr_err("tc: Cannot reserve resource\n");
goto out_err_device;
}
if (tc_bus.ext_slot_size) {
tc_bus.resource[1].start = tc_bus.ext_slot_base;
tc_bus.resource[1].end = tc_bus.ext_slot_base +
tc_bus.ext_slot_size *
tc_bus.num_tcslots - 1;
tc_bus.resource[1].name = tc_bus.name;
tc_bus.resource[1].flags = IORESOURCE_MEM;
if (request_resource(&iomem_resource,
&tc_bus.resource[1]) < 0) {
pr_err("tc: Cannot reserve resource\n");
goto out_err_resource;
}
}
tc_bus_add_devices(&tc_bus);
}
return 0;
out_err_resource:
release_resource(&tc_bus.resource[0]);
out_err_device:
put_device(&tc_bus.dev);
out_err:
return 0;
}
subsys_initcall(tc_init);
| linux-master | drivers/tc/tc.c |
/*
* TURBOchannel driver services.
*
* Copyright (c) 2005 James Simmons
* Copyright (c) 2006 Maciej W. Rozycki
*
* Loosely based on drivers/dio/dio-driver.c and
* drivers/pci/pci-driver.c.
*
* This file is subject to the terms and conditions of the GNU
* General Public License. See the file "COPYING" in the main
* directory of this archive for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/tc.h>
/**
* tc_register_driver - register a new TC driver
* @drv: the driver structure to register
*
* Adds the driver structure to the list of registered drivers
* Returns a negative value on error, otherwise 0.
* If no error occurred, the driver remains registered even if
* no device was claimed during registration.
*/
int tc_register_driver(struct tc_driver *tdrv)
{
return driver_register(&tdrv->driver);
}
EXPORT_SYMBOL(tc_register_driver);
/**
* tc_unregister_driver - unregister a TC driver
* @drv: the driver structure to unregister
*
* Deletes the driver structure from the list of registered TC drivers,
* gives it a chance to clean up by calling its remove() function for
* each device it was responsible for, and marks those devices as
* driverless.
*/
void tc_unregister_driver(struct tc_driver *tdrv)
{
driver_unregister(&tdrv->driver);
}
EXPORT_SYMBOL(tc_unregister_driver);
/**
* tc_match_device - tell if a TC device structure has a matching
* TC device ID structure
* @tdrv: the TC driver to earch for matching TC device ID strings
* @tdev: the TC device structure to match against
*
* Used by a driver to check whether a TC device present in the
* system is in its list of supported devices. Returns the matching
* tc_device_id structure or %NULL if there is no match.
*/
static const struct tc_device_id *tc_match_device(struct tc_driver *tdrv,
struct tc_dev *tdev)
{
const struct tc_device_id *id = tdrv->id_table;
if (id) {
while (id->name[0] || id->vendor[0]) {
if (strcmp(tdev->name, id->name) == 0 &&
strcmp(tdev->vendor, id->vendor) == 0)
return id;
id++;
}
}
return NULL;
}
/**
* tc_bus_match - Tell if a device structure has a matching
* TC device ID structure
* @dev: the device structure to match against
* @drv: the device driver to search for matching TC device ID strings
*
* Used by a driver to check whether a TC device present in the
* system is in its list of supported devices. Returns 1 if there
* is a match or 0 otherwise.
*/
static int tc_bus_match(struct device *dev, struct device_driver *drv)
{
struct tc_dev *tdev = to_tc_dev(dev);
struct tc_driver *tdrv = to_tc_driver(drv);
const struct tc_device_id *id;
id = tc_match_device(tdrv, tdev);
if (id)
return 1;
return 0;
}
struct bus_type tc_bus_type = {
.name = "tc",
.match = tc_bus_match,
};
EXPORT_SYMBOL(tc_bus_type);
static int __init tc_driver_init(void)
{
return bus_register(&tc_bus_type);
}
postcore_initcall(tc_driver_init);
| linux-master | drivers/tc/tc-driver.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011-2017, The Linux Foundation
*/
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include "slimbus.h"
/**
* slim_msg_response() - Deliver Message response received from a device to the
* framework.
*
* @ctrl: Controller handle
* @reply: Reply received from the device
* @len: Length of the reply
* @tid: Transaction ID received with which framework can associate reply.
*
* Called by controller to inform framework about the response received.
* This helps in making the API asynchronous, and controller-driver doesn't need
* to manage 1 more table other than the one managed by framework mapping TID
* with buffers
*/
void slim_msg_response(struct slim_controller *ctrl, u8 *reply, u8 tid, u8 len)
{
struct slim_msg_txn *txn;
struct slim_val_inf *msg;
unsigned long flags;
spin_lock_irqsave(&ctrl->txn_lock, flags);
txn = idr_find(&ctrl->tid_idr, tid);
spin_unlock_irqrestore(&ctrl->txn_lock, flags);
if (txn == NULL)
return;
msg = txn->msg;
if (msg == NULL || msg->rbuf == NULL) {
dev_err(ctrl->dev, "Got response to invalid TID:%d, len:%d\n",
tid, len);
return;
}
slim_free_txn_tid(ctrl, txn);
memcpy(msg->rbuf, reply, len);
if (txn->comp)
complete(txn->comp);
/* Remove runtime-pm vote now that response was received for TID txn */
pm_runtime_mark_last_busy(ctrl->dev);
pm_runtime_put_autosuspend(ctrl->dev);
}
EXPORT_SYMBOL_GPL(slim_msg_response);
/**
* slim_alloc_txn_tid() - Allocate a tid to txn
*
* @ctrl: Controller handle
* @txn: transaction to be allocated with tid.
*
* Return: zero on success with valid txn->tid and error code on failures.
*/
int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ctrl->txn_lock, flags);
ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1,
SLIM_MAX_TIDS, GFP_ATOMIC);
if (ret < 0) {
spin_unlock_irqrestore(&ctrl->txn_lock, flags);
return ret;
}
txn->tid = ret;
spin_unlock_irqrestore(&ctrl->txn_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(slim_alloc_txn_tid);
/**
* slim_free_txn_tid() - Free tid of txn
*
* @ctrl: Controller handle
* @txn: transaction whose tid should be freed
*/
void slim_free_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
{
unsigned long flags;
spin_lock_irqsave(&ctrl->txn_lock, flags);
idr_remove(&ctrl->tid_idr, txn->tid);
spin_unlock_irqrestore(&ctrl->txn_lock, flags);
}
EXPORT_SYMBOL_GPL(slim_free_txn_tid);
/**
* slim_do_transfer() - Process a SLIMbus-messaging transaction
*
* @ctrl: Controller handle
* @txn: Transaction to be sent over SLIMbus
*
* Called by controller to transmit messaging transactions not dealing with
* Interface/Value elements. (e.g. transmitting a message to assign logical
* address to a slave device
*
* Return: -ETIMEDOUT: If transmission of this message timed out
* (e.g. due to bus lines not being clocked or driven by controller)
*/
int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
{
DECLARE_COMPLETION_ONSTACK(done);
bool need_tid = false, clk_pause_msg = false;
int ret, timeout;
/*
* do not vote for runtime-PM if the transactions are part of clock
* pause sequence
*/
if (ctrl->sched.clk_state == SLIM_CLK_ENTERING_PAUSE &&
(txn->mt == SLIM_MSG_MT_CORE &&
txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
clk_pause_msg = true;
if (!clk_pause_msg) {
ret = pm_runtime_get_sync(ctrl->dev);
if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
dev_err(ctrl->dev, "ctrl wrong state:%d, ret:%d\n",
ctrl->sched.clk_state, ret);
goto slim_xfer_err;
}
}
/* Initialize tid to invalid value */
txn->tid = 0;
need_tid = slim_tid_txn(txn->mt, txn->mc);
if (need_tid) {
ret = slim_alloc_txn_tid(ctrl, txn);
if (ret)
return ret;
if (!txn->msg->comp)
txn->comp = &done;
else
txn->comp = txn->comp;
}
ret = ctrl->xfer_msg(ctrl, txn);
if (!ret && need_tid && !txn->msg->comp) {
unsigned long ms = txn->rl + HZ;
timeout = wait_for_completion_timeout(txn->comp,
msecs_to_jiffies(ms));
if (!timeout) {
ret = -ETIMEDOUT;
slim_free_txn_tid(ctrl, txn);
}
}
if (ret)
dev_err(ctrl->dev, "Tx:MT:0x%x, MC:0x%x, LA:0x%x failed:%d\n",
txn->mt, txn->mc, txn->la, ret);
slim_xfer_err:
if (!clk_pause_msg && (txn->tid == 0 || ret == -ETIMEDOUT)) {
/*
* remove runtime-pm vote if this was TX only, or
* if there was error during this transaction
*/
pm_runtime_mark_last_busy(ctrl->dev);
pm_runtime_put_autosuspend(ctrl->dev);
}
return ret;
}
EXPORT_SYMBOL_GPL(slim_do_transfer);
static int slim_val_inf_sanity(struct slim_controller *ctrl,
struct slim_val_inf *msg, u8 mc)
{
if (!msg || msg->num_bytes > 16 ||
(msg->start_offset + msg->num_bytes) > 0xC00)
goto reterr;
switch (mc) {
case SLIM_MSG_MC_REQUEST_VALUE:
case SLIM_MSG_MC_REQUEST_INFORMATION:
if (msg->rbuf != NULL)
return 0;
break;
case SLIM_MSG_MC_CHANGE_VALUE:
case SLIM_MSG_MC_CLEAR_INFORMATION:
if (msg->wbuf != NULL)
return 0;
break;
case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
if (msg->rbuf != NULL && msg->wbuf != NULL)
return 0;
break;
}
reterr:
if (msg)
dev_err(ctrl->dev, "Sanity check failed:msg:offset:0x%x, mc:%d\n",
msg->start_offset, mc);
return -EINVAL;
}
static u16 slim_slicesize(int code)
{
static const u8 sizetocode[16] = {
0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7
};
code = clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
return sizetocode[code - 1];
}
/**
* slim_xfer_msg() - Transfer a value info message on slim device
*
* @sbdev: slim device to which this msg has to be transfered
* @msg: value info message pointer
* @mc: message code of the message
*
* Called by drivers which want to transfer a vlaue or info elements.
*
* Return: -ETIMEDOUT: If transmission of this message timed out
*/
int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg,
u8 mc)
{
DEFINE_SLIM_LDEST_TXN(txn_stack, mc, 6, sbdev->laddr, msg);
struct slim_msg_txn *txn = &txn_stack;
struct slim_controller *ctrl = sbdev->ctrl;
int ret;
u16 sl;
if (!ctrl)
return -EINVAL;
ret = slim_val_inf_sanity(ctrl, msg, mc);
if (ret)
return ret;
sl = slim_slicesize(msg->num_bytes);
dev_dbg(ctrl->dev, "SB xfer msg:os:%x, len:%d, MC:%x, sl:%x\n",
msg->start_offset, msg->num_bytes, mc, sl);
txn->ec = ((sl | (1 << 3)) | ((msg->start_offset & 0xFFF) << 4));
switch (mc) {
case SLIM_MSG_MC_REQUEST_CHANGE_VALUE:
case SLIM_MSG_MC_CHANGE_VALUE:
case SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION:
case SLIM_MSG_MC_CLEAR_INFORMATION:
txn->rl += msg->num_bytes;
break;
default:
break;
}
if (slim_tid_txn(txn->mt, txn->mc))
txn->rl++;
return slim_do_transfer(ctrl, txn);
}
EXPORT_SYMBOL_GPL(slim_xfer_msg);
static void slim_fill_msg(struct slim_val_inf *msg, u32 addr,
size_t count, u8 *rbuf, u8 *wbuf)
{
msg->start_offset = addr;
msg->num_bytes = count;
msg->rbuf = rbuf;
msg->wbuf = wbuf;
msg->comp = NULL;
}
/**
* slim_read() - Read SLIMbus value element
*
* @sdev: client handle.
* @addr: address of value element to read.
* @count: number of bytes to read. Maximum bytes allowed are 16.
* @val: will return what the value element value was
*
* Return: -EINVAL for Invalid parameters, -ETIMEDOUT If transmission of
* this message timed out (e.g. due to bus lines not being clocked
* or driven by controller)
*/
int slim_read(struct slim_device *sdev, u32 addr, size_t count, u8 *val)
{
struct slim_val_inf msg;
slim_fill_msg(&msg, addr, count, val, NULL);
return slim_xfer_msg(sdev, &msg, SLIM_MSG_MC_REQUEST_VALUE);
}
EXPORT_SYMBOL_GPL(slim_read);
/**
* slim_readb() - Read byte from SLIMbus value element
*
* @sdev: client handle.
* @addr: address in the value element to read.
*
* Return: byte value of value element.
*/
int slim_readb(struct slim_device *sdev, u32 addr)
{
int ret;
u8 buf;
ret = slim_read(sdev, addr, 1, &buf);
if (ret < 0)
return ret;
else
return buf;
}
EXPORT_SYMBOL_GPL(slim_readb);
/**
* slim_write() - Write SLIMbus value element
*
* @sdev: client handle.
* @addr: address in the value element to write.
* @count: number of bytes to write. Maximum bytes allowed are 16.
* @val: value to write to value element
*
* Return: -EINVAL for Invalid parameters, -ETIMEDOUT If transmission of
* this message timed out (e.g. due to bus lines not being clocked
* or driven by controller)
*/
int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val)
{
struct slim_val_inf msg;
slim_fill_msg(&msg, addr, count, NULL, val);
return slim_xfer_msg(sdev, &msg, SLIM_MSG_MC_CHANGE_VALUE);
}
EXPORT_SYMBOL_GPL(slim_write);
/**
* slim_writeb() - Write byte to SLIMbus value element
*
* @sdev: client handle.
* @addr: address of value element to write.
* @value: value to write to value element
*
* Return: -EINVAL for Invalid parameters, -ETIMEDOUT If transmission of
* this message timed out (e.g. due to bus lines not being clocked
* or driven by controller)
*
*/
int slim_writeb(struct slim_device *sdev, u32 addr, u8 value)
{
return slim_write(sdev, addr, 1, &value);
}
EXPORT_SYMBOL_GPL(slim_writeb);
| linux-master | drivers/slimbus/messaging.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011-2017, The Linux Foundation
*/
#include <linux/errno.h>
#include "slimbus.h"
/**
* slim_ctrl_clk_pause() - Called by slimbus controller to enter/exit
* 'clock pause'
* @ctrl: controller requesting bus to be paused or woken up
* @wakeup: Wakeup this controller from clock pause.
* @restart: Restart time value per spec used for clock pause. This value
* isn't used when controller is to be woken up.
*
* Slimbus specification needs this sequence to turn-off clocks for the bus.
* The sequence involves sending 3 broadcast messages (reconfiguration
* sequence) to inform all devices on the bus.
* To exit clock-pause, controller typically wakes up active framer device.
* This API executes clock pause reconfiguration sequence if wakeup is false.
* If wakeup is true, controller's wakeup is called.
* For entering clock-pause, -EBUSY is returned if a message txn in pending.
*/
int slim_ctrl_clk_pause(struct slim_controller *ctrl, bool wakeup, u8 restart)
{
int i, ret = 0;
unsigned long flags;
struct slim_sched *sched = &ctrl->sched;
struct slim_val_inf msg = {0, 0, NULL, NULL};
DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION,
3, SLIM_LA_MANAGER, &msg);
if (wakeup == false && restart > SLIM_CLK_UNSPECIFIED)
return -EINVAL;
mutex_lock(&sched->m_reconf);
if (wakeup) {
if (sched->clk_state == SLIM_CLK_ACTIVE) {
mutex_unlock(&sched->m_reconf);
return 0;
}
/*
* Fine-tune calculation based on clock gear,
* message-bandwidth after bandwidth management
*/
ret = wait_for_completion_timeout(&sched->pause_comp,
msecs_to_jiffies(100));
if (!ret) {
mutex_unlock(&sched->m_reconf);
pr_err("Previous clock pause did not finish");
return -ETIMEDOUT;
}
ret = 0;
/*
* Slimbus framework will call controller wakeup
* Controller should make sure that it sets active framer
* out of clock pause
*/
if (sched->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup)
ret = ctrl->wakeup(ctrl);
if (!ret)
sched->clk_state = SLIM_CLK_ACTIVE;
mutex_unlock(&sched->m_reconf);
return ret;
}
/* already paused */
if (ctrl->sched.clk_state == SLIM_CLK_PAUSED) {
mutex_unlock(&sched->m_reconf);
return 0;
}
spin_lock_irqsave(&ctrl->txn_lock, flags);
for (i = 0; i < SLIM_MAX_TIDS; i++) {
/* Pending response for a message */
if (idr_find(&ctrl->tid_idr, i)) {
spin_unlock_irqrestore(&ctrl->txn_lock, flags);
mutex_unlock(&sched->m_reconf);
return -EBUSY;
}
}
spin_unlock_irqrestore(&ctrl->txn_lock, flags);
sched->clk_state = SLIM_CLK_ENTERING_PAUSE;
/* clock pause sequence */
ret = slim_do_transfer(ctrl, &txn);
if (ret)
goto clk_pause_ret;
txn.mc = SLIM_MSG_MC_NEXT_PAUSE_CLOCK;
txn.rl = 4;
msg.num_bytes = 1;
msg.wbuf = &restart;
ret = slim_do_transfer(ctrl, &txn);
if (ret)
goto clk_pause_ret;
txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
txn.rl = 3;
msg.num_bytes = 1;
msg.wbuf = NULL;
ret = slim_do_transfer(ctrl, &txn);
clk_pause_ret:
if (ret) {
sched->clk_state = SLIM_CLK_ACTIVE;
} else {
sched->clk_state = SLIM_CLK_PAUSED;
complete(&sched->pause_comp);
}
mutex_unlock(&sched->m_reconf);
return ret;
}
EXPORT_SYMBOL_GPL(slim_ctrl_clk_pause);
| linux-master | drivers/slimbus/sched.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011-2017, The Linux Foundation
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/idr.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/slimbus.h>
#include "slimbus.h"
static DEFINE_IDA(ctrl_ida);
static const struct slim_device_id *slim_match(const struct slim_device_id *id,
const struct slim_device *sbdev)
{
while (id->manf_id != 0 || id->prod_code != 0) {
if (id->manf_id == sbdev->e_addr.manf_id &&
id->prod_code == sbdev->e_addr.prod_code &&
id->dev_index == sbdev->e_addr.dev_index &&
id->instance == sbdev->e_addr.instance)
return id;
id++;
}
return NULL;
}
static int slim_device_match(struct device *dev, struct device_driver *drv)
{
struct slim_device *sbdev = to_slim_device(dev);
struct slim_driver *sbdrv = to_slim_driver(drv);
/* Attempt an OF style match first */
if (of_driver_match_device(dev, drv))
return 1;
return !!slim_match(sbdrv->id_table, sbdev);
}
static void slim_device_update_status(struct slim_device *sbdev,
enum slim_device_status status)
{
struct slim_driver *sbdrv;
if (sbdev->status == status)
return;
sbdev->status = status;
if (!sbdev->dev.driver)
return;
sbdrv = to_slim_driver(sbdev->dev.driver);
if (sbdrv->device_status)
sbdrv->device_status(sbdev, sbdev->status);
}
static int slim_device_probe(struct device *dev)
{
struct slim_device *sbdev = to_slim_device(dev);
struct slim_driver *sbdrv = to_slim_driver(dev->driver);
int ret;
ret = sbdrv->probe(sbdev);
if (ret)
return ret;
/* try getting the logical address after probe */
ret = slim_get_logical_addr(sbdev);
if (!ret) {
slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
} else {
dev_err(&sbdev->dev, "Failed to get logical address\n");
ret = -EPROBE_DEFER;
}
return ret;
}
static void slim_device_remove(struct device *dev)
{
struct slim_device *sbdev = to_slim_device(dev);
struct slim_driver *sbdrv;
if (dev->driver) {
sbdrv = to_slim_driver(dev->driver);
if (sbdrv->remove)
sbdrv->remove(sbdev);
}
}
static int slim_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct slim_device *sbdev = to_slim_device(dev);
return add_uevent_var(env, "MODALIAS=slim:%s", dev_name(&sbdev->dev));
}
struct bus_type slimbus_bus = {
.name = "slimbus",
.match = slim_device_match,
.probe = slim_device_probe,
.remove = slim_device_remove,
.uevent = slim_device_uevent,
};
EXPORT_SYMBOL_GPL(slimbus_bus);
/*
* __slim_driver_register() - Client driver registration with SLIMbus
*
* @drv:Client driver to be associated with client-device.
* @owner: owning module/driver
*
* This API will register the client driver with the SLIMbus
* It is called from the driver's module-init function.
*/
int __slim_driver_register(struct slim_driver *drv, struct module *owner)
{
/* ID table and probe are mandatory */
if (!(drv->driver.of_match_table || drv->id_table) || !drv->probe)
return -EINVAL;
drv->driver.bus = &slimbus_bus;
drv->driver.owner = owner;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(__slim_driver_register);
/*
* slim_driver_unregister() - Undo effect of slim_driver_register
*
* @drv: Client driver to be unregistered
*/
void slim_driver_unregister(struct slim_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(slim_driver_unregister);
static void slim_dev_release(struct device *dev)
{
struct slim_device *sbdev = to_slim_device(dev);
kfree(sbdev);
}
static int slim_add_device(struct slim_controller *ctrl,
struct slim_device *sbdev,
struct device_node *node)
{
sbdev->dev.bus = &slimbus_bus;
sbdev->dev.parent = ctrl->dev;
sbdev->dev.release = slim_dev_release;
sbdev->dev.driver = NULL;
sbdev->ctrl = ctrl;
INIT_LIST_HEAD(&sbdev->stream_list);
spin_lock_init(&sbdev->stream_list_lock);
sbdev->dev.of_node = of_node_get(node);
sbdev->dev.fwnode = of_fwnode_handle(node);
dev_set_name(&sbdev->dev, "%x:%x:%x:%x",
sbdev->e_addr.manf_id,
sbdev->e_addr.prod_code,
sbdev->e_addr.dev_index,
sbdev->e_addr.instance);
return device_register(&sbdev->dev);
}
static struct slim_device *slim_alloc_device(struct slim_controller *ctrl,
struct slim_eaddr *eaddr,
struct device_node *node)
{
struct slim_device *sbdev;
int ret;
sbdev = kzalloc(sizeof(*sbdev), GFP_KERNEL);
if (!sbdev)
return NULL;
sbdev->e_addr = *eaddr;
ret = slim_add_device(ctrl, sbdev, node);
if (ret) {
put_device(&sbdev->dev);
return NULL;
}
return sbdev;
}
static void of_register_slim_devices(struct slim_controller *ctrl)
{
struct device *dev = ctrl->dev;
struct device_node *node;
if (!ctrl->dev->of_node)
return;
for_each_child_of_node(ctrl->dev->of_node, node) {
struct slim_device *sbdev;
struct slim_eaddr e_addr;
const char *compat = NULL;
int reg[2], ret;
int manf_id, prod_code;
compat = of_get_property(node, "compatible", NULL);
if (!compat)
continue;
ret = sscanf(compat, "slim%x,%x", &manf_id, &prod_code);
if (ret != 2) {
dev_err(dev, "Manf ID & Product code not found %s\n",
compat);
continue;
}
ret = of_property_read_u32_array(node, "reg", reg, 2);
if (ret) {
dev_err(dev, "Device and Instance id not found:%d\n",
ret);
continue;
}
e_addr.dev_index = reg[0];
e_addr.instance = reg[1];
e_addr.manf_id = manf_id;
e_addr.prod_code = prod_code;
sbdev = slim_alloc_device(ctrl, &e_addr, node);
if (!sbdev)
continue;
}
}
/*
* slim_register_controller() - Controller bring-up and registration.
*
* @ctrl: Controller to be registered.
*
* A controller is registered with the framework using this API.
* If devices on a controller were registered before controller,
* this will make sure that they get probed when controller is up
*/
int slim_register_controller(struct slim_controller *ctrl)
{
int id;
id = ida_alloc(&ctrl_ida, GFP_KERNEL);
if (id < 0)
return id;
ctrl->id = id;
if (!ctrl->min_cg)
ctrl->min_cg = SLIM_MIN_CLK_GEAR;
if (!ctrl->max_cg)
ctrl->max_cg = SLIM_MAX_CLK_GEAR;
ida_init(&ctrl->laddr_ida);
idr_init(&ctrl->tid_idr);
mutex_init(&ctrl->lock);
mutex_init(&ctrl->sched.m_reconf);
init_completion(&ctrl->sched.pause_comp);
spin_lock_init(&ctrl->txn_lock);
dev_dbg(ctrl->dev, "Bus [%s] registered:dev:%p\n",
ctrl->name, ctrl->dev);
of_register_slim_devices(ctrl);
return 0;
}
EXPORT_SYMBOL_GPL(slim_register_controller);
/* slim_remove_device: Remove the effect of slim_add_device() */
static void slim_remove_device(struct slim_device *sbdev)
{
of_node_put(sbdev->dev.of_node);
device_unregister(&sbdev->dev);
}
static int slim_ctrl_remove_device(struct device *dev, void *null)
{
slim_remove_device(to_slim_device(dev));
return 0;
}
/**
* slim_unregister_controller() - Controller tear-down.
*
* @ctrl: Controller to tear-down.
*/
int slim_unregister_controller(struct slim_controller *ctrl)
{
/* Remove all clients */
device_for_each_child(ctrl->dev, NULL, slim_ctrl_remove_device);
ida_free(&ctrl_ida, ctrl->id);
return 0;
}
EXPORT_SYMBOL_GPL(slim_unregister_controller);
/**
* slim_report_absent() - Controller calls this function when a device
* reports absent, OR when the device cannot be communicated with
*
* @sbdev: Device that cannot be reached, or sent report absent
*/
void slim_report_absent(struct slim_device *sbdev)
{
struct slim_controller *ctrl = sbdev->ctrl;
if (!ctrl)
return;
/* invalidate logical addresses */
mutex_lock(&ctrl->lock);
sbdev->is_laddr_valid = false;
mutex_unlock(&ctrl->lock);
if (!ctrl->get_laddr)
ida_free(&ctrl->laddr_ida, sbdev->laddr);
slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_DOWN);
}
EXPORT_SYMBOL_GPL(slim_report_absent);
static bool slim_eaddr_equal(struct slim_eaddr *a, struct slim_eaddr *b)
{
return (a->manf_id == b->manf_id &&
a->prod_code == b->prod_code &&
a->dev_index == b->dev_index &&
a->instance == b->instance);
}
static int slim_match_dev(struct device *dev, void *data)
{
struct slim_eaddr *e_addr = data;
struct slim_device *sbdev = to_slim_device(dev);
return slim_eaddr_equal(&sbdev->e_addr, e_addr);
}
static struct slim_device *find_slim_device(struct slim_controller *ctrl,
struct slim_eaddr *eaddr)
{
struct slim_device *sbdev;
struct device *dev;
dev = device_find_child(ctrl->dev, eaddr, slim_match_dev);
if (dev) {
sbdev = to_slim_device(dev);
return sbdev;
}
return NULL;
}
/**
* slim_get_device() - get handle to a device.
*
* @ctrl: Controller on which this device will be added/queried
* @e_addr: Enumeration address of the device to be queried
*
* Return: pointer to a device if it has already reported. Creates a new
* device and returns pointer to it if the device has not yet enumerated.
*/
struct slim_device *slim_get_device(struct slim_controller *ctrl,
struct slim_eaddr *e_addr)
{
struct slim_device *sbdev;
sbdev = find_slim_device(ctrl, e_addr);
if (!sbdev) {
sbdev = slim_alloc_device(ctrl, e_addr, NULL);
if (!sbdev)
return ERR_PTR(-ENOMEM);
}
return sbdev;
}
EXPORT_SYMBOL_GPL(slim_get_device);
static int of_slim_match_dev(struct device *dev, void *data)
{
struct device_node *np = data;
struct slim_device *sbdev = to_slim_device(dev);
return (sbdev->dev.of_node == np);
}
static struct slim_device *of_find_slim_device(struct slim_controller *ctrl,
struct device_node *np)
{
struct slim_device *sbdev;
struct device *dev;
dev = device_find_child(ctrl->dev, np, of_slim_match_dev);
if (dev) {
sbdev = to_slim_device(dev);
return sbdev;
}
return NULL;
}
/**
* of_slim_get_device() - get handle to a device using dt node.
*
* @ctrl: Controller on which this device will be added/queried
* @np: node pointer to device
*
* Return: pointer to a device if it has already reported. Creates a new
* device and returns pointer to it if the device has not yet enumerated.
*/
struct slim_device *of_slim_get_device(struct slim_controller *ctrl,
struct device_node *np)
{
return of_find_slim_device(ctrl, np);
}
EXPORT_SYMBOL_GPL(of_slim_get_device);
static int slim_device_alloc_laddr(struct slim_device *sbdev,
bool report_present)
{
struct slim_controller *ctrl = sbdev->ctrl;
u8 laddr;
int ret;
mutex_lock(&ctrl->lock);
if (ctrl->get_laddr) {
ret = ctrl->get_laddr(ctrl, &sbdev->e_addr, &laddr);
if (ret < 0)
goto err;
} else if (report_present) {
ret = ida_simple_get(&ctrl->laddr_ida,
0, SLIM_LA_MANAGER - 1, GFP_KERNEL);
if (ret < 0)
goto err;
laddr = ret;
} else {
ret = -EINVAL;
goto err;
}
if (ctrl->set_laddr) {
ret = ctrl->set_laddr(ctrl, &sbdev->e_addr, laddr);
if (ret) {
ret = -EINVAL;
goto err;
}
}
sbdev->laddr = laddr;
sbdev->is_laddr_valid = true;
mutex_unlock(&ctrl->lock);
slim_device_update_status(sbdev, SLIM_DEVICE_STATUS_UP);
dev_dbg(ctrl->dev, "setting slimbus l-addr:%x, ea:%x,%x,%x,%x\n",
laddr, sbdev->e_addr.manf_id, sbdev->e_addr.prod_code,
sbdev->e_addr.dev_index, sbdev->e_addr.instance);
return 0;
err:
mutex_unlock(&ctrl->lock);
return ret;
}
/**
* slim_device_report_present() - Report enumerated device.
*
* @ctrl: Controller with which device is enumerated.
* @e_addr: Enumeration address of the device.
* @laddr: Return logical address (if valid flag is false)
*
* Called by controller in response to REPORT_PRESENT. Framework will assign
* a logical address to this enumeration address.
* Function returns -EXFULL to indicate that all logical addresses are already
* taken.
*/
int slim_device_report_present(struct slim_controller *ctrl,
struct slim_eaddr *e_addr, u8 *laddr)
{
struct slim_device *sbdev;
int ret;
ret = pm_runtime_get_sync(ctrl->dev);
if (ctrl->sched.clk_state != SLIM_CLK_ACTIVE) {
dev_err(ctrl->dev, "slim ctrl not active,state:%d, ret:%d\n",
ctrl->sched.clk_state, ret);
goto slimbus_not_active;
}
sbdev = slim_get_device(ctrl, e_addr);
if (IS_ERR(sbdev))
return -ENODEV;
if (sbdev->is_laddr_valid) {
*laddr = sbdev->laddr;
return 0;
}
ret = slim_device_alloc_laddr(sbdev, true);
slimbus_not_active:
pm_runtime_mark_last_busy(ctrl->dev);
pm_runtime_put_autosuspend(ctrl->dev);
return ret;
}
EXPORT_SYMBOL_GPL(slim_device_report_present);
/**
* slim_get_logical_addr() - get/allocate logical address of a SLIMbus device.
*
* @sbdev: client handle requesting the address.
*
* Return: zero if a logical address is valid or a new logical address
* has been assigned. error code in case of error.
*/
int slim_get_logical_addr(struct slim_device *sbdev)
{
if (!sbdev->is_laddr_valid)
return slim_device_alloc_laddr(sbdev, false);
return 0;
}
EXPORT_SYMBOL_GPL(slim_get_logical_addr);
static void __exit slimbus_exit(void)
{
bus_unregister(&slimbus_bus);
}
module_exit(slimbus_exit);
static int __init slimbus_init(void)
{
return bus_register(&slimbus_bus);
}
postcore_initcall(slimbus_init);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SLIMbus core");
| linux-master | drivers/slimbus/core.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
// Copyright (c) 2018, Linaro Limited
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/slimbus.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/remoteproc/qcom_rproc.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/soc/qcom/qmi.h>
#include <linux/soc/qcom/pdr.h>
#include <net/sock.h>
#include "slimbus.h"
/* NGD (Non-ported Generic Device) registers */
#define NGD_CFG 0x0
#define NGD_CFG_ENABLE BIT(0)
#define NGD_CFG_RX_MSGQ_EN BIT(1)
#define NGD_CFG_TX_MSGQ_EN BIT(2)
#define NGD_STATUS 0x4
#define NGD_LADDR BIT(1)
#define NGD_RX_MSGQ_CFG 0x8
#define NGD_INT_EN 0x10
#define NGD_INT_RECFG_DONE BIT(24)
#define NGD_INT_TX_NACKED_2 BIT(25)
#define NGD_INT_MSG_BUF_CONTE BIT(26)
#define NGD_INT_MSG_TX_INVAL BIT(27)
#define NGD_INT_IE_VE_CHG BIT(28)
#define NGD_INT_DEV_ERR BIT(29)
#define NGD_INT_RX_MSG_RCVD BIT(30)
#define NGD_INT_TX_MSG_SENT BIT(31)
#define NGD_INT_STAT 0x14
#define NGD_INT_CLR 0x18
#define DEF_NGD_INT_MASK (NGD_INT_TX_NACKED_2 | NGD_INT_MSG_BUF_CONTE | \
NGD_INT_MSG_TX_INVAL | NGD_INT_IE_VE_CHG | \
NGD_INT_DEV_ERR | NGD_INT_TX_MSG_SENT | \
NGD_INT_RX_MSG_RCVD)
/* Slimbus QMI service */
#define SLIMBUS_QMI_SVC_ID 0x0301
#define SLIMBUS_QMI_SVC_V1 1
#define SLIMBUS_QMI_INS_ID 0
#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
#define SLIMBUS_QMI_POWER_REQ_V01 0x0021
#define SLIMBUS_QMI_POWER_RESP_V01 0x0021
#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022
#define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022
#define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 14
#define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
#define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
#define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
#define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7
/* QMI response timeout of 500ms */
#define SLIMBUS_QMI_RESP_TOUT 1000
/* User defined commands */
#define SLIM_USR_MC_GENERIC_ACK 0x25
#define SLIM_USR_MC_MASTER_CAPABILITY 0x0
#define SLIM_USR_MC_REPORT_SATELLITE 0x1
#define SLIM_USR_MC_ADDR_QUERY 0xD
#define SLIM_USR_MC_ADDR_REPLY 0xE
#define SLIM_USR_MC_DEFINE_CHAN 0x20
#define SLIM_USR_MC_DEF_ACT_CHAN 0x21
#define SLIM_USR_MC_CHAN_CTRL 0x23
#define SLIM_USR_MC_RECONFIG_NOW 0x24
#define SLIM_USR_MC_REQ_BW 0x28
#define SLIM_USR_MC_CONNECT_SRC 0x2C
#define SLIM_USR_MC_CONNECT_SINK 0x2D
#define SLIM_USR_MC_DISCONNECT_PORT 0x2E
#define SLIM_USR_MC_REPEAT_CHANGE_VALUE 0x0
#define QCOM_SLIM_NGD_AUTOSUSPEND MSEC_PER_SEC
#define SLIM_RX_MSGQ_TIMEOUT_VAL 0x10000
#define SLIM_LA_MGR 0xFF
#define SLIM_ROOT_FREQ 24576000
#define LADDR_RETRY 5
/* Per spec.max 40 bytes per received message */
#define SLIM_MSGQ_BUF_LEN 40
#define QCOM_SLIM_NGD_DESC_NUM 32
#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
#define INIT_MX_RETRIES 10
#define DEF_RETRY_MS 10
#define SAT_MAGIC_LSB 0xD9
#define SAT_MAGIC_MSB 0xC5
#define SAT_MSG_VER 0x1
#define SAT_MSG_PROT 0x1
#define to_ngd(d) container_of(d, struct qcom_slim_ngd, dev)
struct ngd_reg_offset_data {
u32 offset, size;
};
static const struct ngd_reg_offset_data ngd_v1_5_offset_info = {
.offset = 0x1000,
.size = 0x1000,
};
enum qcom_slim_ngd_state {
QCOM_SLIM_NGD_CTRL_AWAKE,
QCOM_SLIM_NGD_CTRL_IDLE,
QCOM_SLIM_NGD_CTRL_ASLEEP,
QCOM_SLIM_NGD_CTRL_DOWN,
};
struct qcom_slim_ngd_qmi {
struct qmi_handle qmi;
struct sockaddr_qrtr svc_info;
struct qmi_handle svc_event_hdl;
struct qmi_response_type_v01 resp;
struct qmi_handle *handle;
struct completion qmi_comp;
};
struct qcom_slim_ngd_ctrl;
struct qcom_slim_ngd;
struct qcom_slim_ngd_dma_desc {
struct dma_async_tx_descriptor *desc;
struct qcom_slim_ngd_ctrl *ctrl;
struct completion *comp;
dma_cookie_t cookie;
dma_addr_t phys;
void *base;
};
struct qcom_slim_ngd {
struct platform_device *pdev;
void __iomem *base;
int id;
};
struct qcom_slim_ngd_ctrl {
struct slim_framer framer;
struct slim_controller ctrl;
struct qcom_slim_ngd_qmi qmi;
struct qcom_slim_ngd *ngd;
struct device *dev;
void __iomem *base;
struct dma_chan *dma_rx_channel;
struct dma_chan *dma_tx_channel;
struct qcom_slim_ngd_dma_desc rx_desc[QCOM_SLIM_NGD_DESC_NUM];
struct qcom_slim_ngd_dma_desc txdesc[QCOM_SLIM_NGD_DESC_NUM];
struct completion reconf;
struct work_struct m_work;
struct work_struct ngd_up_work;
struct workqueue_struct *mwq;
struct completion qmi_up;
spinlock_t tx_buf_lock;
struct mutex tx_lock;
struct mutex ssr_lock;
struct notifier_block nb;
void *notifier;
struct pdr_handle *pdr;
enum qcom_slim_ngd_state state;
dma_addr_t rx_phys_base;
dma_addr_t tx_phys_base;
void *rx_base;
void *tx_base;
int tx_tail;
int tx_head;
u32 ver;
};
enum slimbus_mode_enum_type_v01 {
/* To force a 32 bit signed enum. Do not change or use*/
SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
SLIMBUS_MODE_SATELLITE_V01 = 1,
SLIMBUS_MODE_MASTER_V01 = 2,
SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
};
enum slimbus_pm_enum_type_v01 {
/* To force a 32 bit signed enum. Do not change or use*/
SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
SLIMBUS_PM_INACTIVE_V01 = 1,
SLIMBUS_PM_ACTIVE_V01 = 2,
SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
};
enum slimbus_resp_enum_type_v01 {
SLIMBUS_RESP_ENUM_TYPE_MIN_VAL_V01 = INT_MIN,
SLIMBUS_RESP_SYNCHRONOUS_V01 = 1,
SLIMBUS_RESP_ENUM_TYPE_MAX_VAL_V01 = INT_MAX,
};
struct slimbus_select_inst_req_msg_v01 {
uint32_t instance;
uint8_t mode_valid;
enum slimbus_mode_enum_type_v01 mode;
};
struct slimbus_select_inst_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
struct slimbus_power_req_msg_v01 {
enum slimbus_pm_enum_type_v01 pm_req;
uint8_t resp_type_valid;
enum slimbus_resp_enum_type_v01 resp_type;
};
struct slimbus_power_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
static struct qmi_elem_info slimbus_select_inst_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(uint32_t),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct slimbus_select_inst_req_msg_v01,
instance),
.ei_array = NULL,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(uint8_t),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct slimbus_select_inst_req_msg_v01,
mode_valid),
.ei_array = NULL,
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(enum slimbus_mode_enum_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct slimbus_select_inst_req_msg_v01,
mode),
.ei_array = NULL,
},
{
.data_type = QMI_EOTI,
.elem_len = 0,
.elem_size = 0,
.array_type = NO_ARRAY,
.tlv_type = 0x00,
.offset = 0,
.ei_array = NULL,
},
};
static struct qmi_elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct slimbus_select_inst_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.elem_len = 0,
.elem_size = 0,
.array_type = NO_ARRAY,
.tlv_type = 0x00,
.offset = 0,
.ei_array = NULL,
},
};
static struct qmi_elem_info slimbus_power_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(enum slimbus_pm_enum_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct slimbus_power_req_msg_v01,
pm_req),
.ei_array = NULL,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(uint8_t),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct slimbus_power_req_msg_v01,
resp_type_valid),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum slimbus_resp_enum_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct slimbus_power_req_msg_v01,
resp_type),
},
{
.data_type = QMI_EOTI,
.elem_len = 0,
.elem_size = 0,
.array_type = NO_ARRAY,
.tlv_type = 0x00,
.offset = 0,
.ei_array = NULL,
},
};
static struct qmi_elem_info slimbus_power_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct slimbus_power_resp_msg_v01, resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.elem_len = 0,
.elem_size = 0,
.array_type = NO_ARRAY,
.tlv_type = 0x00,
.offset = 0,
.ei_array = NULL,
},
};
static int qcom_slim_qmi_send_select_inst_req(struct qcom_slim_ngd_ctrl *ctrl,
struct slimbus_select_inst_req_msg_v01 *req)
{
struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
struct qmi_txn txn;
int rc;
rc = qmi_txn_init(ctrl->qmi.handle, &txn,
slimbus_select_inst_resp_msg_v01_ei, &resp);
if (rc < 0) {
dev_err(ctrl->dev, "QMI TXN init fail: %d\n", rc);
return rc;
}
rc = qmi_send_request(ctrl->qmi.handle, NULL, &txn,
SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01,
SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN,
slimbus_select_inst_req_msg_v01_ei, req);
if (rc < 0) {
dev_err(ctrl->dev, "QMI send req fail %d\n", rc);
qmi_txn_cancel(&txn);
return rc;
}
rc = qmi_txn_wait(&txn, SLIMBUS_QMI_RESP_TOUT);
if (rc < 0) {
dev_err(ctrl->dev, "QMI TXN wait fail: %d\n", rc);
return rc;
}
/* Check the response */
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
dev_err(ctrl->dev, "QMI request failed 0x%x\n",
resp.resp.result);
return -EREMOTEIO;
}
return 0;
}
static void qcom_slim_qmi_power_resp_cb(struct qmi_handle *handle,
struct sockaddr_qrtr *sq,
struct qmi_txn *txn, const void *data)
{
struct slimbus_power_resp_msg_v01 *resp;
resp = (struct slimbus_power_resp_msg_v01 *)data;
if (resp->resp.result != QMI_RESULT_SUCCESS_V01)
pr_err("QMI power request failed 0x%x\n",
resp->resp.result);
complete(&txn->completion);
}
static int qcom_slim_qmi_send_power_request(struct qcom_slim_ngd_ctrl *ctrl,
struct slimbus_power_req_msg_v01 *req)
{
struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
struct qmi_txn txn;
int rc;
rc = qmi_txn_init(ctrl->qmi.handle, &txn,
slimbus_power_resp_msg_v01_ei, &resp);
rc = qmi_send_request(ctrl->qmi.handle, NULL, &txn,
SLIMBUS_QMI_POWER_REQ_V01,
SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN,
slimbus_power_req_msg_v01_ei, req);
if (rc < 0) {
dev_err(ctrl->dev, "QMI send req fail %d\n", rc);
qmi_txn_cancel(&txn);
return rc;
}
rc = qmi_txn_wait(&txn, SLIMBUS_QMI_RESP_TOUT);
if (rc < 0) {
dev_err(ctrl->dev, "QMI TXN wait fail: %d\n", rc);
return rc;
}
/* Check the response */
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
dev_err(ctrl->dev, "QMI request failed 0x%x\n",
resp.resp.result);
return -EREMOTEIO;
}
return 0;
}
static const struct qmi_msg_handler qcom_slim_qmi_msg_handlers[] = {
{
.type = QMI_RESPONSE,
.msg_id = SLIMBUS_QMI_POWER_RESP_V01,
.ei = slimbus_power_resp_msg_v01_ei,
.decoded_size = sizeof(struct slimbus_power_resp_msg_v01),
.fn = qcom_slim_qmi_power_resp_cb,
},
{}
};
static int qcom_slim_qmi_init(struct qcom_slim_ngd_ctrl *ctrl,
bool apps_is_master)
{
struct slimbus_select_inst_req_msg_v01 req;
struct qmi_handle *handle;
int rc;
handle = devm_kzalloc(ctrl->dev, sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
rc = qmi_handle_init(handle, SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN,
NULL, qcom_slim_qmi_msg_handlers);
if (rc < 0) {
dev_err(ctrl->dev, "QMI client init failed: %d\n", rc);
goto qmi_handle_init_failed;
}
rc = kernel_connect(handle->sock,
(struct sockaddr *)&ctrl->qmi.svc_info,
sizeof(ctrl->qmi.svc_info), 0);
if (rc < 0) {
dev_err(ctrl->dev, "Remote Service connect failed: %d\n", rc);
goto qmi_connect_to_service_failed;
}
/* Instance is 0 based */
req.instance = (ctrl->ngd->id >> 1);
req.mode_valid = 1;
/* Mode indicates the role of the ADSP */
if (apps_is_master)
req.mode = SLIMBUS_MODE_SATELLITE_V01;
else
req.mode = SLIMBUS_MODE_MASTER_V01;
ctrl->qmi.handle = handle;
rc = qcom_slim_qmi_send_select_inst_req(ctrl, &req);
if (rc) {
dev_err(ctrl->dev, "failed to select h/w instance\n");
goto qmi_select_instance_failed;
}
return 0;
qmi_select_instance_failed:
ctrl->qmi.handle = NULL;
qmi_connect_to_service_failed:
qmi_handle_release(handle);
qmi_handle_init_failed:
devm_kfree(ctrl->dev, handle);
return rc;
}
static void qcom_slim_qmi_exit(struct qcom_slim_ngd_ctrl *ctrl)
{
if (!ctrl->qmi.handle)
return;
qmi_handle_release(ctrl->qmi.handle);
devm_kfree(ctrl->dev, ctrl->qmi.handle);
ctrl->qmi.handle = NULL;
}
static int qcom_slim_qmi_power_request(struct qcom_slim_ngd_ctrl *ctrl,
bool active)
{
struct slimbus_power_req_msg_v01 req;
if (active)
req.pm_req = SLIMBUS_PM_ACTIVE_V01;
else
req.pm_req = SLIMBUS_PM_INACTIVE_V01;
req.resp_type_valid = 0;
return qcom_slim_qmi_send_power_request(ctrl, &req);
}
static u32 *qcom_slim_ngd_tx_msg_get(struct qcom_slim_ngd_ctrl *ctrl, int len,
struct completion *comp)
{
struct qcom_slim_ngd_dma_desc *desc;
unsigned long flags;
spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
if ((ctrl->tx_tail + 1) % QCOM_SLIM_NGD_DESC_NUM == ctrl->tx_head) {
spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
return NULL;
}
desc = &ctrl->txdesc[ctrl->tx_tail];
desc->base = ctrl->tx_base + ctrl->tx_tail * SLIM_MSGQ_BUF_LEN;
desc->comp = comp;
ctrl->tx_tail = (ctrl->tx_tail + 1) % QCOM_SLIM_NGD_DESC_NUM;
spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
return desc->base;
}
static void qcom_slim_ngd_tx_msg_dma_cb(void *args)
{
struct qcom_slim_ngd_dma_desc *desc = args;
struct qcom_slim_ngd_ctrl *ctrl = desc->ctrl;
unsigned long flags;
spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
if (desc->comp) {
complete(desc->comp);
desc->comp = NULL;
}
ctrl->tx_head = (ctrl->tx_head + 1) % QCOM_SLIM_NGD_DESC_NUM;
spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
}
static int qcom_slim_ngd_tx_msg_post(struct qcom_slim_ngd_ctrl *ctrl,
void *buf, int len)
{
struct qcom_slim_ngd_dma_desc *desc;
unsigned long flags;
int index, offset;
spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
offset = buf - ctrl->tx_base;
index = offset/SLIM_MSGQ_BUF_LEN;
desc = &ctrl->txdesc[index];
desc->phys = ctrl->tx_phys_base + offset;
desc->base = ctrl->tx_base + offset;
desc->ctrl = ctrl;
len = (len + 3) & 0xfc;
desc->desc = dmaengine_prep_slave_single(ctrl->dma_tx_channel,
desc->phys, len,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT);
if (!desc->desc) {
dev_err(ctrl->dev, "unable to prepare channel\n");
spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
return -EINVAL;
}
desc->desc->callback = qcom_slim_ngd_tx_msg_dma_cb;
desc->desc->callback_param = desc;
desc->desc->cookie = dmaengine_submit(desc->desc);
dma_async_issue_pending(ctrl->dma_tx_channel);
spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
return 0;
}
static void qcom_slim_ngd_rx(struct qcom_slim_ngd_ctrl *ctrl, u8 *buf)
{
u8 mc, mt, len;
mt = SLIM_HEADER_GET_MT(buf[0]);
len = SLIM_HEADER_GET_RL(buf[0]);
mc = SLIM_HEADER_GET_MC(buf[1]);
if (mc == SLIM_USR_MC_MASTER_CAPABILITY &&
mt == SLIM_MSG_MT_SRC_REFERRED_USER)
queue_work(ctrl->mwq, &ctrl->m_work);
if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
mc == SLIM_MSG_MC_REPLY_VALUE || (mc == SLIM_USR_MC_ADDR_REPLY &&
mt == SLIM_MSG_MT_SRC_REFERRED_USER) ||
(mc == SLIM_USR_MC_GENERIC_ACK &&
mt == SLIM_MSG_MT_SRC_REFERRED_USER)) {
slim_msg_response(&ctrl->ctrl, &buf[4], buf[3], len - 4);
pm_runtime_mark_last_busy(ctrl->ctrl.dev);
}
}
static void qcom_slim_ngd_rx_msgq_cb(void *args)
{
struct qcom_slim_ngd_dma_desc *desc = args;
struct qcom_slim_ngd_ctrl *ctrl = desc->ctrl;
qcom_slim_ngd_rx(ctrl, (u8 *)desc->base);
/* Add descriptor back to the queue */
desc->desc = dmaengine_prep_slave_single(ctrl->dma_rx_channel,
desc->phys, SLIM_MSGQ_BUF_LEN,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!desc->desc) {
dev_err(ctrl->dev, "Unable to prepare rx channel\n");
return;
}
desc->desc->callback = qcom_slim_ngd_rx_msgq_cb;
desc->desc->callback_param = desc;
desc->desc->cookie = dmaengine_submit(desc->desc);
dma_async_issue_pending(ctrl->dma_rx_channel);
}
static int qcom_slim_ngd_post_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
{
struct qcom_slim_ngd_dma_desc *desc;
int i;
for (i = 0; i < QCOM_SLIM_NGD_DESC_NUM; i++) {
desc = &ctrl->rx_desc[i];
desc->phys = ctrl->rx_phys_base + i * SLIM_MSGQ_BUF_LEN;
desc->ctrl = ctrl;
desc->base = ctrl->rx_base + i * SLIM_MSGQ_BUF_LEN;
desc->desc = dmaengine_prep_slave_single(ctrl->dma_rx_channel,
desc->phys, SLIM_MSGQ_BUF_LEN,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!desc->desc) {
dev_err(ctrl->dev, "Unable to prepare rx channel\n");
return -EINVAL;
}
desc->desc->callback = qcom_slim_ngd_rx_msgq_cb;
desc->desc->callback_param = desc;
desc->desc->cookie = dmaengine_submit(desc->desc);
}
dma_async_issue_pending(ctrl->dma_rx_channel);
return 0;
}
static int qcom_slim_ngd_init_rx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
{
struct device *dev = ctrl->dev;
int ret, size;
ctrl->dma_rx_channel = dma_request_chan(dev, "rx");
if (IS_ERR(ctrl->dma_rx_channel)) {
dev_err(dev, "Failed to request RX dma channel");
ret = PTR_ERR(ctrl->dma_rx_channel);
ctrl->dma_rx_channel = NULL;
return ret;
}
size = QCOM_SLIM_NGD_DESC_NUM * SLIM_MSGQ_BUF_LEN;
ctrl->rx_base = dma_alloc_coherent(dev, size, &ctrl->rx_phys_base,
GFP_KERNEL);
if (!ctrl->rx_base) {
ret = -ENOMEM;
goto rel_rx;
}
ret = qcom_slim_ngd_post_rx_msgq(ctrl);
if (ret) {
dev_err(dev, "post_rx_msgq() failed 0x%x\n", ret);
goto rx_post_err;
}
return 0;
rx_post_err:
dma_free_coherent(dev, size, ctrl->rx_base, ctrl->rx_phys_base);
rel_rx:
dma_release_channel(ctrl->dma_rx_channel);
return ret;
}
static int qcom_slim_ngd_init_tx_msgq(struct qcom_slim_ngd_ctrl *ctrl)
{
struct device *dev = ctrl->dev;
unsigned long flags;
int ret = 0;
int size;
ctrl->dma_tx_channel = dma_request_chan(dev, "tx");
if (IS_ERR(ctrl->dma_tx_channel)) {
dev_err(dev, "Failed to request TX dma channel");
ret = PTR_ERR(ctrl->dma_tx_channel);
ctrl->dma_tx_channel = NULL;
return ret;
}
size = ((QCOM_SLIM_NGD_DESC_NUM + 1) * SLIM_MSGQ_BUF_LEN);
ctrl->tx_base = dma_alloc_coherent(dev, size, &ctrl->tx_phys_base,
GFP_KERNEL);
if (!ctrl->tx_base) {
ret = -EINVAL;
goto rel_tx;
}
spin_lock_irqsave(&ctrl->tx_buf_lock, flags);
ctrl->tx_tail = 0;
ctrl->tx_head = 0;
spin_unlock_irqrestore(&ctrl->tx_buf_lock, flags);
return 0;
rel_tx:
dma_release_channel(ctrl->dma_tx_channel);
return ret;
}
static int qcom_slim_ngd_init_dma(struct qcom_slim_ngd_ctrl *ctrl)
{
int ret = 0;
ret = qcom_slim_ngd_init_rx_msgq(ctrl);
if (ret) {
dev_err(ctrl->dev, "rx dma init failed\n");
return ret;
}
ret = qcom_slim_ngd_init_tx_msgq(ctrl);
if (ret)
dev_err(ctrl->dev, "tx dma init failed\n");
return ret;
}
static irqreturn_t qcom_slim_ngd_interrupt(int irq, void *d)
{
struct qcom_slim_ngd_ctrl *ctrl = d;
void __iomem *base = ctrl->ngd->base;
u32 stat;
if (pm_runtime_suspended(ctrl->ctrl.dev)) {
dev_warn_once(ctrl->dev, "Interrupt received while suspended\n");
return IRQ_NONE;
}
stat = readl(base + NGD_INT_STAT);
if ((stat & NGD_INT_MSG_BUF_CONTE) ||
(stat & NGD_INT_MSG_TX_INVAL) || (stat & NGD_INT_DEV_ERR) ||
(stat & NGD_INT_TX_NACKED_2)) {
dev_err(ctrl->dev, "Error Interrupt received 0x%x\n", stat);
}
writel(stat, base + NGD_INT_CLR);
return IRQ_HANDLED;
}
static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
struct slim_msg_txn *txn)
{
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
DECLARE_COMPLETION_ONSTACK(tx_sent);
DECLARE_COMPLETION_ONSTACK(done);
int ret, timeout, i;
u8 wbuf[SLIM_MSGQ_BUF_LEN];
u8 rbuf[SLIM_MSGQ_BUF_LEN];
u32 *pbuf;
u8 *puc;
u8 la = txn->la;
bool usr_msg = false;
if (txn->mt == SLIM_MSG_MT_CORE &&
(txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
return 0;
if (txn->dt == SLIM_MSG_DEST_ENUMADDR)
return -EPROTONOSUPPORT;
if (txn->msg->num_bytes > SLIM_MSGQ_BUF_LEN ||
txn->rl > SLIM_MSGQ_BUF_LEN) {
dev_err(ctrl->dev, "msg exceeds HW limit\n");
return -EINVAL;
}
pbuf = qcom_slim_ngd_tx_msg_get(ctrl, txn->rl, &tx_sent);
if (!pbuf) {
dev_err(ctrl->dev, "Message buffer unavailable\n");
return -ENOMEM;
}
if (txn->mt == SLIM_MSG_MT_CORE &&
(txn->mc == SLIM_MSG_MC_CONNECT_SOURCE ||
txn->mc == SLIM_MSG_MC_CONNECT_SINK ||
txn->mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
txn->mt = SLIM_MSG_MT_DEST_REFERRED_USER;
switch (txn->mc) {
case SLIM_MSG_MC_CONNECT_SOURCE:
txn->mc = SLIM_USR_MC_CONNECT_SRC;
break;
case SLIM_MSG_MC_CONNECT_SINK:
txn->mc = SLIM_USR_MC_CONNECT_SINK;
break;
case SLIM_MSG_MC_DISCONNECT_PORT:
txn->mc = SLIM_USR_MC_DISCONNECT_PORT;
break;
default:
return -EINVAL;
}
usr_msg = true;
i = 0;
wbuf[i++] = txn->la;
la = SLIM_LA_MGR;
wbuf[i++] = txn->msg->wbuf[0];
if (txn->mc != SLIM_USR_MC_DISCONNECT_PORT)
wbuf[i++] = txn->msg->wbuf[1];
txn->comp = &done;
ret = slim_alloc_txn_tid(sctrl, txn);
if (ret) {
dev_err(ctrl->dev, "Unable to allocate TID\n");
return ret;
}
wbuf[i++] = txn->tid;
txn->msg->num_bytes = i;
txn->msg->wbuf = wbuf;
txn->msg->rbuf = rbuf;
txn->rl = txn->msg->num_bytes + 4;
}
/* HW expects length field to be excluded */
txn->rl--;
puc = (u8 *)pbuf;
*pbuf = 0;
if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) {
*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 0,
la);
puc += 3;
} else {
*pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, txn->mc, 1,
la);
puc += 2;
}
if (slim_tid_txn(txn->mt, txn->mc))
*(puc++) = txn->tid;
if (slim_ec_txn(txn->mt, txn->mc)) {
*(puc++) = (txn->ec & 0xFF);
*(puc++) = (txn->ec >> 8) & 0xFF;
}
if (txn->msg && txn->msg->wbuf)
memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
mutex_lock(&ctrl->tx_lock);
ret = qcom_slim_ngd_tx_msg_post(ctrl, pbuf, txn->rl);
if (ret) {
mutex_unlock(&ctrl->tx_lock);
return ret;
}
timeout = wait_for_completion_timeout(&tx_sent, HZ);
if (!timeout) {
dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
txn->mt);
mutex_unlock(&ctrl->tx_lock);
return -ETIMEDOUT;
}
if (usr_msg) {
timeout = wait_for_completion_timeout(&done, HZ);
if (!timeout) {
dev_err(sctrl->dev, "TX timed out:MC:0x%x,mt:0x%x",
txn->mc, txn->mt);
mutex_unlock(&ctrl->tx_lock);
return -ETIMEDOUT;
}
}
mutex_unlock(&ctrl->tx_lock);
return 0;
}
static int qcom_slim_ngd_xfer_msg_sync(struct slim_controller *ctrl,
struct slim_msg_txn *txn)
{
DECLARE_COMPLETION_ONSTACK(done);
int ret, timeout;
ret = pm_runtime_get_sync(ctrl->dev);
if (ret < 0)
goto pm_put;
txn->comp = &done;
ret = qcom_slim_ngd_xfer_msg(ctrl, txn);
if (ret)
goto pm_put;
timeout = wait_for_completion_timeout(&done, HZ);
if (!timeout) {
dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
txn->mt);
ret = -ETIMEDOUT;
goto pm_put;
}
return 0;
pm_put:
pm_runtime_put(ctrl->dev);
return ret;
}
static int qcom_slim_calc_coef(struct slim_stream_runtime *rt, int *exp)
{
struct slim_controller *ctrl = rt->dev->ctrl;
int coef;
if (rt->ratem * ctrl->a_framer->superfreq < rt->rate)
rt->ratem++;
coef = rt->ratem;
*exp = 0;
/*
* CRM = Cx(2^E) is the formula we are using.
* Here C is the coffecient and E is the exponent.
* CRM is the Channel Rate Multiplier.
* Coefficeint should be either 1 or 3 and exponenet
* should be an integer between 0 to 9, inclusive.
*/
while (1) {
while ((coef & 0x1) != 0x1) {
coef >>= 1;
*exp = *exp + 1;
}
if (coef <= 3)
break;
coef++;
}
/*
* we rely on the coef value (1 or 3) to set a bit
* in the slimbus message packet. This bit is
* BIT(5) which is the segment rate coefficient.
*/
if (coef == 1) {
if (*exp > 9)
return -EIO;
coef = 0;
} else {
if (*exp > 8)
return -EIO;
coef = 1;
}
return coef;
}
static int qcom_slim_ngd_enable_stream(struct slim_stream_runtime *rt)
{
struct slim_device *sdev = rt->dev;
struct slim_controller *ctrl = sdev->ctrl;
struct slim_val_inf msg = {0};
u8 wbuf[SLIM_MSGQ_BUF_LEN];
u8 rbuf[SLIM_MSGQ_BUF_LEN];
struct slim_msg_txn txn = {0,};
int i, ret;
txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
txn.dt = SLIM_MSG_DEST_LOGICALADDR;
txn.la = SLIM_LA_MGR;
txn.ec = 0;
txn.msg = &msg;
txn.msg->num_bytes = 0;
txn.msg->wbuf = wbuf;
txn.msg->rbuf = rbuf;
for (i = 0; i < rt->num_ports; i++) {
struct slim_port *port = &rt->ports[i];
if (txn.msg->num_bytes == 0) {
int exp = 0, coef = 0;
wbuf[txn.msg->num_bytes++] = sdev->laddr;
wbuf[txn.msg->num_bytes] = rt->bps >> 2 |
(port->ch.aux_fmt << 6);
/* calculate coef dynamically */
coef = qcom_slim_calc_coef(rt, &exp);
if (coef < 0) {
dev_err(&sdev->dev,
"%s: error calculating coef %d\n", __func__,
coef);
return -EIO;
}
if (coef)
wbuf[txn.msg->num_bytes] |= BIT(5);
txn.msg->num_bytes++;
wbuf[txn.msg->num_bytes++] = exp << 4 | rt->prot;
if (rt->prot == SLIM_PROTO_ISO)
wbuf[txn.msg->num_bytes++] =
port->ch.prrate |
SLIM_CHANNEL_CONTENT_FL;
else
wbuf[txn.msg->num_bytes++] = port->ch.prrate;
ret = slim_alloc_txn_tid(ctrl, &txn);
if (ret) {
dev_err(&sdev->dev, "Fail to allocate TID\n");
return -ENXIO;
}
wbuf[txn.msg->num_bytes++] = txn.tid;
}
wbuf[txn.msg->num_bytes++] = port->ch.id;
}
txn.mc = SLIM_USR_MC_DEF_ACT_CHAN;
txn.rl = txn.msg->num_bytes + 4;
ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
if (ret) {
slim_free_txn_tid(ctrl, &txn);
dev_err(&sdev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn.mc,
txn.mt);
return ret;
}
txn.mc = SLIM_USR_MC_RECONFIG_NOW;
txn.msg->num_bytes = 2;
wbuf[1] = sdev->laddr;
txn.rl = txn.msg->num_bytes + 4;
ret = slim_alloc_txn_tid(ctrl, &txn);
if (ret) {
dev_err(ctrl->dev, "Fail to allocate TID\n");
return ret;
}
wbuf[0] = txn.tid;
ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
if (ret) {
slim_free_txn_tid(ctrl, &txn);
dev_err(&sdev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn.mc,
txn.mt);
}
return ret;
}
static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl,
struct slim_eaddr *ea, u8 *laddr)
{
struct slim_val_inf msg = {0};
u8 failed_ea[6] = {0, 0, 0, 0, 0, 0};
struct slim_msg_txn txn;
u8 wbuf[10] = {0};
u8 rbuf[10] = {0};
int ret;
txn.mt = SLIM_MSG_MT_DEST_REFERRED_USER;
txn.dt = SLIM_MSG_DEST_LOGICALADDR;
txn.la = SLIM_LA_MGR;
txn.ec = 0;
txn.mc = SLIM_USR_MC_ADDR_QUERY;
txn.rl = 11;
txn.msg = &msg;
txn.msg->num_bytes = 7;
txn.msg->wbuf = wbuf;
txn.msg->rbuf = rbuf;
ret = slim_alloc_txn_tid(ctrl, &txn);
if (ret < 0)
return ret;
wbuf[0] = (u8)txn.tid;
memcpy(&wbuf[1], ea, sizeof(*ea));
ret = qcom_slim_ngd_xfer_msg_sync(ctrl, &txn);
if (ret) {
slim_free_txn_tid(ctrl, &txn);
return ret;
}
if (!memcmp(rbuf, failed_ea, 6))
return -ENXIO;
*laddr = rbuf[6];
return ret;
}
static int qcom_slim_ngd_exit_dma(struct qcom_slim_ngd_ctrl *ctrl)
{
if (ctrl->dma_rx_channel) {
dmaengine_terminate_sync(ctrl->dma_rx_channel);
dma_release_channel(ctrl->dma_rx_channel);
}
if (ctrl->dma_tx_channel) {
dmaengine_terminate_sync(ctrl->dma_tx_channel);
dma_release_channel(ctrl->dma_tx_channel);
}
ctrl->dma_tx_channel = ctrl->dma_rx_channel = NULL;
return 0;
}
static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
{
u32 cfg = readl_relaxed(ctrl->ngd->base);
if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN ||
ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP)
qcom_slim_ngd_init_dma(ctrl);
/* By default enable message queues */
cfg |= NGD_CFG_RX_MSGQ_EN;
cfg |= NGD_CFG_TX_MSGQ_EN;
/* Enable NGD if it's not already enabled*/
if (!(cfg & NGD_CFG_ENABLE))
cfg |= NGD_CFG_ENABLE;
writel_relaxed(cfg, ctrl->ngd->base);
}
static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
{
enum qcom_slim_ngd_state cur_state = ctrl->state;
struct qcom_slim_ngd *ngd = ctrl->ngd;
u32 laddr, rx_msgq;
int timeout, ret = 0;
if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) {
timeout = wait_for_completion_timeout(&ctrl->qmi.qmi_comp, HZ);
if (!timeout)
return -EREMOTEIO;
}
if (ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP ||
ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN) {
ret = qcom_slim_qmi_power_request(ctrl, true);
if (ret) {
dev_err(ctrl->dev, "SLIM QMI power request failed:%d\n",
ret);
return ret;
}
}
ctrl->ver = readl_relaxed(ctrl->base);
/* Version info in 16 MSbits */
ctrl->ver >>= 16;
laddr = readl_relaxed(ngd->base + NGD_STATUS);
if (laddr & NGD_LADDR) {
/*
* external MDM restart case where ADSP itself was active framer
* For example, modem restarted when playback was active
*/
if (cur_state == QCOM_SLIM_NGD_CTRL_AWAKE) {
dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
return 0;
}
qcom_slim_ngd_setup(ctrl);
return 0;
}
/*
* Reinitialize only when registers are not retained or when enumeration
* is lost for ngd.
*/
reinit_completion(&ctrl->reconf);
writel_relaxed(DEF_NGD_INT_MASK, ngd->base + NGD_INT_EN);
rx_msgq = readl_relaxed(ngd->base + NGD_RX_MSGQ_CFG);
writel_relaxed(rx_msgq|SLIM_RX_MSGQ_TIMEOUT_VAL,
ngd->base + NGD_RX_MSGQ_CFG);
qcom_slim_ngd_setup(ctrl);
timeout = wait_for_completion_timeout(&ctrl->reconf, HZ);
if (!timeout) {
dev_err(ctrl->dev, "capability exchange timed-out\n");
return -ETIMEDOUT;
}
return 0;
}
static void qcom_slim_ngd_notify_slaves(struct qcom_slim_ngd_ctrl *ctrl)
{
struct slim_device *sbdev;
struct device_node *node;
for_each_child_of_node(ctrl->ngd->pdev->dev.of_node, node) {
sbdev = of_slim_get_device(&ctrl->ctrl, node);
if (!sbdev)
continue;
if (slim_get_logical_addr(sbdev))
dev_err(ctrl->dev, "Failed to get logical address\n");
}
}
static void qcom_slim_ngd_master_worker(struct work_struct *work)
{
struct qcom_slim_ngd_ctrl *ctrl;
struct slim_msg_txn txn;
struct slim_val_inf msg = {0};
int retries = 0;
u8 wbuf[8];
int ret = 0;
ctrl = container_of(work, struct qcom_slim_ngd_ctrl, m_work);
txn.dt = SLIM_MSG_DEST_LOGICALADDR;
txn.ec = 0;
txn.mc = SLIM_USR_MC_REPORT_SATELLITE;
txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
txn.la = SLIM_LA_MGR;
wbuf[0] = SAT_MAGIC_LSB;
wbuf[1] = SAT_MAGIC_MSB;
wbuf[2] = SAT_MSG_VER;
wbuf[3] = SAT_MSG_PROT;
txn.msg = &msg;
txn.msg->wbuf = wbuf;
txn.msg->num_bytes = 4;
txn.rl = 8;
dev_info(ctrl->dev, "SLIM SAT: Rcvd master capability\n");
capability_retry:
ret = qcom_slim_ngd_xfer_msg(&ctrl->ctrl, &txn);
if (!ret) {
if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP)
complete(&ctrl->reconf);
else
dev_err(ctrl->dev, "unexpected state:%d\n",
ctrl->state);
if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
qcom_slim_ngd_notify_slaves(ctrl);
} else if (ret == -EIO) {
dev_err(ctrl->dev, "capability message NACKed, retrying\n");
if (retries < INIT_MX_RETRIES) {
msleep(DEF_RETRY_MS);
retries++;
goto capability_retry;
}
} else {
dev_err(ctrl->dev, "SLIM: capability TX failed:%d\n", ret);
}
}
static int qcom_slim_ngd_update_device_status(struct device *dev, void *null)
{
slim_report_absent(to_slim_device(dev));
return 0;
}
static int qcom_slim_ngd_runtime_resume(struct device *dev)
{
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
int ret = 0;
if (!ctrl->qmi.handle)
return 0;
if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP)
ret = qcom_slim_ngd_power_up(ctrl);
if (ret) {
/* Did SSR cause this power up failure */
if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN)
ctrl->state = QCOM_SLIM_NGD_CTRL_ASLEEP;
else
dev_err(ctrl->dev, "HW wakeup attempt during SSR\n");
} else {
ctrl->state = QCOM_SLIM_NGD_CTRL_AWAKE;
}
return 0;
}
static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable)
{
if (enable) {
int ret = qcom_slim_qmi_init(ctrl, false);
if (ret) {
dev_err(ctrl->dev, "qmi init fail, ret:%d, state:%d\n",
ret, ctrl->state);
return ret;
}
/* controller state should be in sync with framework state */
complete(&ctrl->qmi.qmi_comp);
if (!pm_runtime_enabled(ctrl->ctrl.dev) ||
!pm_runtime_suspended(ctrl->ctrl.dev))
qcom_slim_ngd_runtime_resume(ctrl->ctrl.dev);
else
pm_runtime_resume(ctrl->ctrl.dev);
pm_runtime_mark_last_busy(ctrl->ctrl.dev);
pm_runtime_put(ctrl->ctrl.dev);
ret = slim_register_controller(&ctrl->ctrl);
if (ret) {
dev_err(ctrl->dev, "error adding slim controller\n");
return ret;
}
dev_info(ctrl->dev, "SLIM controller Registered\n");
} else {
qcom_slim_qmi_exit(ctrl);
slim_unregister_controller(&ctrl->ctrl);
}
return 0;
}
static int qcom_slim_ngd_qmi_new_server(struct qmi_handle *hdl,
struct qmi_service *service)
{
struct qcom_slim_ngd_qmi *qmi =
container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
struct qcom_slim_ngd_ctrl *ctrl =
container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
qmi->svc_info.sq_family = AF_QIPCRTR;
qmi->svc_info.sq_node = service->node;
qmi->svc_info.sq_port = service->port;
complete(&ctrl->qmi_up);
return 0;
}
static void qcom_slim_ngd_qmi_del_server(struct qmi_handle *hdl,
struct qmi_service *service)
{
struct qcom_slim_ngd_qmi *qmi =
container_of(hdl, struct qcom_slim_ngd_qmi, svc_event_hdl);
struct qcom_slim_ngd_ctrl *ctrl =
container_of(qmi, struct qcom_slim_ngd_ctrl, qmi);
reinit_completion(&ctrl->qmi_up);
qmi->svc_info.sq_node = 0;
qmi->svc_info.sq_port = 0;
}
static const struct qmi_ops qcom_slim_ngd_qmi_svc_event_ops = {
.new_server = qcom_slim_ngd_qmi_new_server,
.del_server = qcom_slim_ngd_qmi_del_server,
};
static int qcom_slim_ngd_qmi_svc_event_init(struct qcom_slim_ngd_ctrl *ctrl)
{
struct qcom_slim_ngd_qmi *qmi = &ctrl->qmi;
int ret;
ret = qmi_handle_init(&qmi->svc_event_hdl, 0,
&qcom_slim_ngd_qmi_svc_event_ops, NULL);
if (ret < 0) {
dev_err(ctrl->dev, "qmi_handle_init failed: %d\n", ret);
return ret;
}
ret = qmi_add_lookup(&qmi->svc_event_hdl, SLIMBUS_QMI_SVC_ID,
SLIMBUS_QMI_SVC_V1, SLIMBUS_QMI_INS_ID);
if (ret < 0) {
dev_err(ctrl->dev, "qmi_add_lookup failed: %d\n", ret);
qmi_handle_release(&qmi->svc_event_hdl);
}
return ret;
}
static void qcom_slim_ngd_qmi_svc_event_deinit(struct qcom_slim_ngd_qmi *qmi)
{
qmi_handle_release(&qmi->svc_event_hdl);
}
static struct platform_driver qcom_slim_ngd_driver;
#define QCOM_SLIM_NGD_DRV_NAME "qcom,slim-ngd"
static const struct of_device_id qcom_slim_ngd_dt_match[] = {
{
.compatible = "qcom,slim-ngd-v1.5.0",
.data = &ngd_v1_5_offset_info,
},{
.compatible = "qcom,slim-ngd-v2.1.0",
.data = &ngd_v1_5_offset_info,
},
{}
};
MODULE_DEVICE_TABLE(of, qcom_slim_ngd_dt_match);
static void qcom_slim_ngd_down(struct qcom_slim_ngd_ctrl *ctrl)
{
mutex_lock(&ctrl->ssr_lock);
device_for_each_child(ctrl->ctrl.dev, NULL,
qcom_slim_ngd_update_device_status);
qcom_slim_ngd_enable(ctrl, false);
mutex_unlock(&ctrl->ssr_lock);
}
static void qcom_slim_ngd_up_worker(struct work_struct *work)
{
struct qcom_slim_ngd_ctrl *ctrl;
ctrl = container_of(work, struct qcom_slim_ngd_ctrl, ngd_up_work);
/* Make sure qmi service is up before continuing */
wait_for_completion_interruptible(&ctrl->qmi_up);
mutex_lock(&ctrl->ssr_lock);
qcom_slim_ngd_enable(ctrl, true);
mutex_unlock(&ctrl->ssr_lock);
}
static int qcom_slim_ngd_ssr_pdr_notify(struct qcom_slim_ngd_ctrl *ctrl,
unsigned long action)
{
switch (action) {
case QCOM_SSR_BEFORE_SHUTDOWN:
case SERVREG_SERVICE_STATE_DOWN:
/* Make sure the last dma xfer is finished */
mutex_lock(&ctrl->tx_lock);
if (ctrl->state != QCOM_SLIM_NGD_CTRL_DOWN) {
pm_runtime_get_noresume(ctrl->ctrl.dev);
ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
qcom_slim_ngd_down(ctrl);
qcom_slim_ngd_exit_dma(ctrl);
}
mutex_unlock(&ctrl->tx_lock);
break;
case QCOM_SSR_AFTER_POWERUP:
case SERVREG_SERVICE_STATE_UP:
schedule_work(&ctrl->ngd_up_work);
break;
default:
break;
}
return NOTIFY_OK;
}
static int qcom_slim_ngd_ssr_notify(struct notifier_block *nb,
unsigned long action,
void *data)
{
struct qcom_slim_ngd_ctrl *ctrl = container_of(nb,
struct qcom_slim_ngd_ctrl, nb);
return qcom_slim_ngd_ssr_pdr_notify(ctrl, action);
}
static void slim_pd_status(int state, char *svc_path, void *priv)
{
struct qcom_slim_ngd_ctrl *ctrl = (struct qcom_slim_ngd_ctrl *)priv;
qcom_slim_ngd_ssr_pdr_notify(ctrl, state);
}
static int of_qcom_slim_ngd_register(struct device *parent,
struct qcom_slim_ngd_ctrl *ctrl)
{
const struct ngd_reg_offset_data *data;
struct qcom_slim_ngd *ngd;
const struct of_device_id *match;
struct device_node *node;
u32 id;
int ret;
match = of_match_node(qcom_slim_ngd_dt_match, parent->of_node);
data = match->data;
for_each_available_child_of_node(parent->of_node, node) {
if (of_property_read_u32(node, "reg", &id))
continue;
ngd = kzalloc(sizeof(*ngd), GFP_KERNEL);
if (!ngd) {
of_node_put(node);
return -ENOMEM;
}
ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id);
if (!ngd->pdev) {
kfree(ngd);
of_node_put(node);
return -ENOMEM;
}
ngd->id = id;
ngd->pdev->dev.parent = parent;
ret = driver_set_override(&ngd->pdev->dev,
&ngd->pdev->driver_override,
QCOM_SLIM_NGD_DRV_NAME,
strlen(QCOM_SLIM_NGD_DRV_NAME));
if (ret) {
platform_device_put(ngd->pdev);
kfree(ngd);
of_node_put(node);
return ret;
}
ngd->pdev->dev.of_node = node;
ctrl->ngd = ngd;
ret = platform_device_add(ngd->pdev);
if (ret) {
platform_device_put(ngd->pdev);
kfree(ngd);
of_node_put(node);
return ret;
}
ngd->base = ctrl->base + ngd->id * data->offset +
(ngd->id - 1) * data->size;
return 0;
}
return -ENODEV;
}
static int qcom_slim_ngd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev->parent);
int ret;
ctrl->ctrl.dev = dev;
platform_set_drvdata(pdev, ctrl);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND);
pm_runtime_set_suspended(dev);
pm_runtime_enable(dev);
pm_runtime_get_noresume(dev);
ret = qcom_slim_ngd_qmi_svc_event_init(ctrl);
if (ret) {
dev_err(&pdev->dev, "QMI service registration failed:%d", ret);
return ret;
}
INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker);
INIT_WORK(&ctrl->ngd_up_work, qcom_slim_ngd_up_worker);
ctrl->mwq = create_singlethread_workqueue("ngd_master");
if (!ctrl->mwq) {
dev_err(&pdev->dev, "Failed to start master worker\n");
ret = -ENOMEM;
goto wq_err;
}
return 0;
wq_err:
qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
if (ctrl->mwq)
destroy_workqueue(ctrl->mwq);
return ret;
}
static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qcom_slim_ngd_ctrl *ctrl;
int ret;
struct pdr_service *pds;
ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
dev_set_drvdata(dev, ctrl);
ctrl->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(ctrl->base))
return PTR_ERR(ctrl->base);
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
ret = devm_request_irq(dev, ret, qcom_slim_ngd_interrupt,
IRQF_TRIGGER_HIGH, "slim-ngd", ctrl);
if (ret)
return dev_err_probe(&pdev->dev, ret, "request IRQ failed\n");
ctrl->nb.notifier_call = qcom_slim_ngd_ssr_notify;
ctrl->notifier = qcom_register_ssr_notifier("lpass", &ctrl->nb);
if (IS_ERR(ctrl->notifier))
return PTR_ERR(ctrl->notifier);
ctrl->dev = dev;
ctrl->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
ctrl->framer.superfreq =
ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
ctrl->ctrl.a_framer = &ctrl->framer;
ctrl->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
ctrl->ctrl.get_laddr = qcom_slim_ngd_get_laddr;
ctrl->ctrl.enable_stream = qcom_slim_ngd_enable_stream;
ctrl->ctrl.xfer_msg = qcom_slim_ngd_xfer_msg;
ctrl->ctrl.wakeup = NULL;
ctrl->state = QCOM_SLIM_NGD_CTRL_DOWN;
mutex_init(&ctrl->tx_lock);
mutex_init(&ctrl->ssr_lock);
spin_lock_init(&ctrl->tx_buf_lock);
init_completion(&ctrl->reconf);
init_completion(&ctrl->qmi.qmi_comp);
init_completion(&ctrl->qmi_up);
ctrl->pdr = pdr_handle_alloc(slim_pd_status, ctrl);
if (IS_ERR(ctrl->pdr)) {
ret = dev_err_probe(dev, PTR_ERR(ctrl->pdr),
"Failed to init PDR handle\n");
goto err_pdr_alloc;
}
pds = pdr_add_lookup(ctrl->pdr, "avs/audio", "msm/adsp/audio_pd");
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
ret = dev_err_probe(dev, PTR_ERR(pds), "pdr add lookup failed\n");
goto err_pdr_lookup;
}
platform_driver_register(&qcom_slim_ngd_driver);
return of_qcom_slim_ngd_register(dev, ctrl);
err_pdr_alloc:
qcom_unregister_ssr_notifier(ctrl->notifier, &ctrl->nb);
err_pdr_lookup:
pdr_handle_release(ctrl->pdr);
return ret;
}
static int qcom_slim_ngd_ctrl_remove(struct platform_device *pdev)
{
platform_driver_unregister(&qcom_slim_ngd_driver);
return 0;
}
static int qcom_slim_ngd_remove(struct platform_device *pdev)
{
struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
pdr_handle_release(ctrl->pdr);
qcom_unregister_ssr_notifier(ctrl->notifier, &ctrl->nb);
qcom_slim_ngd_enable(ctrl, false);
qcom_slim_ngd_exit_dma(ctrl);
qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
if (ctrl->mwq)
destroy_workqueue(ctrl->mwq);
kfree(ctrl->ngd);
ctrl->ngd = NULL;
return 0;
}
static int __maybe_unused qcom_slim_ngd_runtime_idle(struct device *dev)
{
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
if (ctrl->state == QCOM_SLIM_NGD_CTRL_AWAKE)
ctrl->state = QCOM_SLIM_NGD_CTRL_IDLE;
pm_request_autosuspend(dev);
return -EAGAIN;
}
static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
{
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
int ret = 0;
qcom_slim_ngd_exit_dma(ctrl);
if (!ctrl->qmi.handle)
return 0;
ret = qcom_slim_qmi_power_request(ctrl, false);
if (ret && ret != -EBUSY)
dev_info(ctrl->dev, "slim resource not idle:%d\n", ret);
if (!ret || ret == -ETIMEDOUT)
ctrl->state = QCOM_SLIM_NGD_CTRL_ASLEEP;
return ret;
}
static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(
qcom_slim_ngd_runtime_suspend,
qcom_slim_ngd_runtime_resume,
qcom_slim_ngd_runtime_idle
)
};
static struct platform_driver qcom_slim_ngd_ctrl_driver = {
.probe = qcom_slim_ngd_ctrl_probe,
.remove = qcom_slim_ngd_ctrl_remove,
.driver = {
.name = "qcom,slim-ngd-ctrl",
.of_match_table = qcom_slim_ngd_dt_match,
},
};
static struct platform_driver qcom_slim_ngd_driver = {
.probe = qcom_slim_ngd_probe,
.remove = qcom_slim_ngd_remove,
.driver = {
.name = QCOM_SLIM_NGD_DRV_NAME,
.pm = &qcom_slim_ngd_dev_pm_ops,
},
};
module_platform_driver(qcom_slim_ngd_ctrl_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm SLIMBus NGD controller");
| linux-master | drivers/slimbus/qcom-ngd-ctrl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011-2017, The Linux Foundation
*/
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include "slimbus.h"
/* Manager registers */
#define MGR_CFG 0x200
#define MGR_STATUS 0x204
#define MGR_INT_EN 0x210
#define MGR_INT_STAT 0x214
#define MGR_INT_CLR 0x218
#define MGR_TX_MSG 0x230
#define MGR_RX_MSG 0x270
#define MGR_IE_STAT 0x2F0
#define MGR_VE_STAT 0x300
#define MGR_CFG_ENABLE 1
/* Framer registers */
#define FRM_CFG 0x400
#define FRM_STAT 0x404
#define FRM_INT_EN 0x410
#define FRM_INT_STAT 0x414
#define FRM_INT_CLR 0x418
#define FRM_WAKEUP 0x41C
#define FRM_CLKCTL_DONE 0x420
#define FRM_IE_STAT 0x430
#define FRM_VE_STAT 0x440
/* Interface registers */
#define INTF_CFG 0x600
#define INTF_STAT 0x604
#define INTF_INT_EN 0x610
#define INTF_INT_STAT 0x614
#define INTF_INT_CLR 0x618
#define INTF_IE_STAT 0x630
#define INTF_VE_STAT 0x640
/* Interrupt status bits */
#define MGR_INT_TX_NACKED_2 BIT(25)
#define MGR_INT_MSG_BUF_CONTE BIT(26)
#define MGR_INT_RX_MSG_RCVD BIT(30)
#define MGR_INT_TX_MSG_SENT BIT(31)
/* Framer config register settings */
#define FRM_ACTIVE 1
#define CLK_GEAR 7
#define ROOT_FREQ 11
#define REF_CLK_GEAR 15
#define INTR_WAKE 19
#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
#define SLIM_ROOT_FREQ 24576000
#define QCOM_SLIM_AUTOSUSPEND 1000
/* MAX message size over control channel */
#define SLIM_MSGQ_BUF_LEN 40
#define QCOM_TX_MSGS 2
#define QCOM_RX_MSGS 8
#define QCOM_BUF_ALLOC_RETRIES 10
#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
/* V2 Component registers */
#define CFG_PORT_V2(r) ((r ## _V2))
#define COMP_CFG_V2 4
#define COMP_TRUST_CFG_V2 0x3000
/* V1 Component registers */
#define CFG_PORT_V1(r) ((r ## _V1))
#define COMP_CFG_V1 0
#define COMP_TRUST_CFG_V1 0x14
/* Resource group info for manager, and non-ported generic device-components */
#define EE_MGR_RSC_GRP (1 << 10)
#define EE_NGD_2 (2 << 6)
#define EE_NGD_1 0
struct slim_ctrl_buf {
void *base;
spinlock_t lock;
int head;
int tail;
int sl_sz;
int n;
};
struct qcom_slim_ctrl {
struct slim_controller ctrl;
struct slim_framer framer;
struct device *dev;
void __iomem *base;
void __iomem *slew_reg;
struct slim_ctrl_buf rx;
struct slim_ctrl_buf tx;
struct completion **wr_comp;
int irq;
struct workqueue_struct *rxwq;
struct work_struct wd;
struct clk *rclk;
struct clk *hclk;
};
static void qcom_slim_queue_tx(struct qcom_slim_ctrl *ctrl, void *buf,
u8 len, u32 tx_reg)
{
int count = (len + 3) >> 2;
__iowrite32_copy(ctrl->base + tx_reg, buf, count);
/* Ensure Oder of subsequent writes */
mb();
}
static void *slim_alloc_rxbuf(struct qcom_slim_ctrl *ctrl)
{
unsigned long flags;
int idx;
spin_lock_irqsave(&ctrl->rx.lock, flags);
if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) {
spin_unlock_irqrestore(&ctrl->rx.lock, flags);
dev_err(ctrl->dev, "RX QUEUE full!");
return NULL;
}
idx = ctrl->rx.tail;
ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n;
spin_unlock_irqrestore(&ctrl->rx.lock, flags);
return ctrl->rx.base + (idx * ctrl->rx.sl_sz);
}
static void slim_ack_txn(struct qcom_slim_ctrl *ctrl, int err)
{
struct completion *comp;
unsigned long flags;
int idx;
spin_lock_irqsave(&ctrl->tx.lock, flags);
idx = ctrl->tx.head;
ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n;
spin_unlock_irqrestore(&ctrl->tx.lock, flags);
comp = ctrl->wr_comp[idx];
ctrl->wr_comp[idx] = NULL;
complete(comp);
}
static irqreturn_t qcom_slim_handle_tx_irq(struct qcom_slim_ctrl *ctrl,
u32 stat)
{
int err = 0;
if (stat & MGR_INT_TX_MSG_SENT)
writel_relaxed(MGR_INT_TX_MSG_SENT,
ctrl->base + MGR_INT_CLR);
if (stat & MGR_INT_TX_NACKED_2) {
u32 mgr_stat = readl_relaxed(ctrl->base + MGR_STATUS);
u32 mgr_ie_stat = readl_relaxed(ctrl->base + MGR_IE_STAT);
u32 frm_stat = readl_relaxed(ctrl->base + FRM_STAT);
u32 frm_cfg = readl_relaxed(ctrl->base + FRM_CFG);
u32 frm_intr_stat = readl_relaxed(ctrl->base + FRM_INT_STAT);
u32 frm_ie_stat = readl_relaxed(ctrl->base + FRM_IE_STAT);
u32 intf_stat = readl_relaxed(ctrl->base + INTF_STAT);
u32 intf_intr_stat = readl_relaxed(ctrl->base + INTF_INT_STAT);
u32 intf_ie_stat = readl_relaxed(ctrl->base + INTF_IE_STAT);
writel_relaxed(MGR_INT_TX_NACKED_2, ctrl->base + MGR_INT_CLR);
dev_err(ctrl->dev, "TX Nack MGR:int:0x%x, stat:0x%x\n",
stat, mgr_stat);
dev_err(ctrl->dev, "TX Nack MGR:ie:0x%x\n", mgr_ie_stat);
dev_err(ctrl->dev, "TX Nack FRM:int:0x%x, stat:0x%x\n",
frm_intr_stat, frm_stat);
dev_err(ctrl->dev, "TX Nack FRM:cfg:0x%x, ie:0x%x\n",
frm_cfg, frm_ie_stat);
dev_err(ctrl->dev, "TX Nack INTF:intr:0x%x, stat:0x%x\n",
intf_intr_stat, intf_stat);
dev_err(ctrl->dev, "TX Nack INTF:ie:0x%x\n",
intf_ie_stat);
err = -ENOTCONN;
}
slim_ack_txn(ctrl, err);
return IRQ_HANDLED;
}
static irqreturn_t qcom_slim_handle_rx_irq(struct qcom_slim_ctrl *ctrl,
u32 stat)
{
u32 *rx_buf, pkt[10];
bool q_rx = false;
u8 mc, mt, len;
pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG);
mt = SLIM_HEADER_GET_MT(pkt[0]);
len = SLIM_HEADER_GET_RL(pkt[0]);
mc = SLIM_HEADER_GET_MC(pkt[0]>>8);
/*
* this message cannot be handled by ISR, so
* let work-queue handle it
*/
if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) {
rx_buf = (u32 *)slim_alloc_rxbuf(ctrl);
if (!rx_buf) {
dev_err(ctrl->dev, "dropping RX:0x%x due to RX full\n",
pkt[0]);
goto rx_ret_irq;
}
rx_buf[0] = pkt[0];
} else {
rx_buf = pkt;
}
__ioread32_copy(rx_buf + 1, ctrl->base + MGR_RX_MSG + 4,
DIV_ROUND_UP(len, 4));
switch (mc) {
case SLIM_MSG_MC_REPORT_PRESENT:
q_rx = true;
break;
case SLIM_MSG_MC_REPLY_INFORMATION:
case SLIM_MSG_MC_REPLY_VALUE:
slim_msg_response(&ctrl->ctrl, (u8 *)(rx_buf + 1),
(u8)(*rx_buf >> 24), (len - 4));
break;
default:
dev_err(ctrl->dev, "unsupported MC,%x MT:%x\n",
mc, mt);
break;
}
rx_ret_irq:
writel(MGR_INT_RX_MSG_RCVD, ctrl->base +
MGR_INT_CLR);
if (q_rx)
queue_work(ctrl->rxwq, &ctrl->wd);
return IRQ_HANDLED;
}
static irqreturn_t qcom_slim_interrupt(int irq, void *d)
{
struct qcom_slim_ctrl *ctrl = d;
u32 stat = readl_relaxed(ctrl->base + MGR_INT_STAT);
int ret = IRQ_NONE;
if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2)
ret = qcom_slim_handle_tx_irq(ctrl, stat);
if (stat & MGR_INT_RX_MSG_RCVD)
ret = qcom_slim_handle_rx_irq(ctrl, stat);
return ret;
}
static int qcom_clk_pause_wakeup(struct slim_controller *sctrl)
{
struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
clk_prepare_enable(ctrl->hclk);
clk_prepare_enable(ctrl->rclk);
enable_irq(ctrl->irq);
writel_relaxed(1, ctrl->base + FRM_WAKEUP);
/* Make sure framer wakeup write goes through before ISR fires */
mb();
/*
* HW Workaround: Currently, slave is reporting lost-sync messages
* after SLIMbus comes out of clock pause.
* Transaction with slave fail before slave reports that message
* Give some time for that report to come
* SLIMbus wakes up in clock gear 10 at 24.576MHz. With each superframe
* being 250 usecs, we wait for 5-10 superframes here to ensure
* we get the message
*/
usleep_range(1250, 2500);
return 0;
}
static void *slim_alloc_txbuf(struct qcom_slim_ctrl *ctrl,
struct slim_msg_txn *txn,
struct completion *done)
{
unsigned long flags;
int idx;
spin_lock_irqsave(&ctrl->tx.lock, flags);
if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) {
spin_unlock_irqrestore(&ctrl->tx.lock, flags);
dev_err(ctrl->dev, "controller TX buf unavailable");
return NULL;
}
idx = ctrl->tx.tail;
ctrl->wr_comp[idx] = done;
ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n;
spin_unlock_irqrestore(&ctrl->tx.lock, flags);
return ctrl->tx.base + (idx * ctrl->tx.sl_sz);
}
static int qcom_xfer_msg(struct slim_controller *sctrl,
struct slim_msg_txn *txn)
{
struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
DECLARE_COMPLETION_ONSTACK(done);
void *pbuf = slim_alloc_txbuf(ctrl, txn, &done);
unsigned long ms = txn->rl + HZ;
u8 *puc;
int ret = 0, timeout, retries = QCOM_BUF_ALLOC_RETRIES;
u8 la = txn->la;
u32 *head;
/* HW expects length field to be excluded */
txn->rl--;
/* spin till buffer is made available */
if (!pbuf) {
while (retries--) {
usleep_range(10000, 15000);
pbuf = slim_alloc_txbuf(ctrl, txn, &done);
if (pbuf)
break;
}
}
if (retries < 0 && !pbuf)
return -ENOMEM;
puc = (u8 *)pbuf;
head = (u32 *)pbuf;
if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) {
*head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
txn->mc, 0, la);
puc += 3;
} else {
*head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
txn->mc, 1, la);
puc += 2;
}
if (slim_tid_txn(txn->mt, txn->mc))
*(puc++) = txn->tid;
if (slim_ec_txn(txn->mt, txn->mc)) {
*(puc++) = (txn->ec & 0xFF);
*(puc++) = (txn->ec >> 8) & 0xFF;
}
if (txn->msg && txn->msg->wbuf)
memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG);
timeout = wait_for_completion_timeout(&done, msecs_to_jiffies(ms));
if (!timeout) {
dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
txn->mt);
ret = -ETIMEDOUT;
}
return ret;
}
static int qcom_set_laddr(struct slim_controller *sctrl,
struct slim_eaddr *ead, u8 laddr)
{
struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
struct {
__be16 manf_id;
__be16 prod_code;
u8 dev_index;
u8 instance;
u8 laddr;
} __packed p;
struct slim_val_inf msg = {0};
DEFINE_SLIM_EDEST_TXN(txn, SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
10, laddr, &msg);
int ret;
p.manf_id = cpu_to_be16(ead->manf_id);
p.prod_code = cpu_to_be16(ead->prod_code);
p.dev_index = ead->dev_index;
p.instance = ead->instance;
p.laddr = laddr;
msg.wbuf = (void *)&p;
msg.num_bytes = 7;
ret = slim_do_transfer(&ctrl->ctrl, &txn);
if (ret)
dev_err(ctrl->dev, "set LA:0x%x failed:ret:%d\n",
laddr, ret);
return ret;
}
static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf)
{
unsigned long flags;
spin_lock_irqsave(&ctrl->rx.lock, flags);
if (ctrl->rx.tail == ctrl->rx.head) {
spin_unlock_irqrestore(&ctrl->rx.lock, flags);
return -ENODATA;
}
memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz),
ctrl->rx.sl_sz);
ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n;
spin_unlock_irqrestore(&ctrl->rx.lock, flags);
return 0;
}
static void qcom_slim_rxwq(struct work_struct *work)
{
u8 buf[SLIM_MSGQ_BUF_LEN];
u8 mc, mt;
int ret;
struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl,
wd);
while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) {
mt = SLIM_HEADER_GET_MT(buf[0]);
mc = SLIM_HEADER_GET_MC(buf[1]);
if (mt == SLIM_MSG_MT_CORE &&
mc == SLIM_MSG_MC_REPORT_PRESENT) {
struct slim_eaddr ea;
u8 laddr;
ea.manf_id = be16_to_cpup((__be16 *)&buf[2]);
ea.prod_code = be16_to_cpup((__be16 *)&buf[4]);
ea.dev_index = buf[6];
ea.instance = buf[7];
ret = slim_device_report_present(&ctrl->ctrl, &ea,
&laddr);
if (ret < 0)
dev_err(ctrl->dev, "assign laddr failed:%d\n",
ret);
} else {
dev_err(ctrl->dev, "unexpected message:mc:%x, mt:%x\n",
mc, mt);
}
}
}
static void qcom_slim_prg_slew(struct platform_device *pdev,
struct qcom_slim_ctrl *ctrl)
{
if (!ctrl->slew_reg) {
/* SLEW RATE register for this SLIMbus */
ctrl->slew_reg = devm_platform_ioremap_resource_byname(pdev, "slew");
if (IS_ERR(ctrl->slew_reg))
return;
}
writel_relaxed(1, ctrl->slew_reg);
/* Make sure SLIMbus-slew rate enabling goes through */
wmb();
}
static int qcom_slim_probe(struct platform_device *pdev)
{
struct qcom_slim_ctrl *ctrl;
struct slim_controller *sctrl;
int ret, ver;
ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
return -ENOMEM;
ctrl->hclk = devm_clk_get(&pdev->dev, "iface");
if (IS_ERR(ctrl->hclk))
return PTR_ERR(ctrl->hclk);
ctrl->rclk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(ctrl->rclk))
return PTR_ERR(ctrl->rclk);
ret = clk_set_rate(ctrl->rclk, SLIM_ROOT_FREQ);
if (ret) {
dev_err(&pdev->dev, "ref-clock set-rate failed:%d\n", ret);
return ret;
}
ctrl->irq = platform_get_irq(pdev, 0);
if (ctrl->irq < 0)
return ctrl->irq;
sctrl = &ctrl->ctrl;
sctrl->dev = &pdev->dev;
ctrl->dev = &pdev->dev;
platform_set_drvdata(pdev, ctrl);
dev_set_drvdata(ctrl->dev, ctrl);
ctrl->base = devm_platform_ioremap_resource_byname(pdev, "ctrl");
if (IS_ERR(ctrl->base))
return PTR_ERR(ctrl->base);
sctrl->set_laddr = qcom_set_laddr;
sctrl->xfer_msg = qcom_xfer_msg;
sctrl->wakeup = qcom_clk_pause_wakeup;
ctrl->tx.n = QCOM_TX_MSGS;
ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN;
ctrl->rx.n = QCOM_RX_MSGS;
ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN;
ctrl->wr_comp = kcalloc(QCOM_TX_MSGS, sizeof(struct completion *),
GFP_KERNEL);
if (!ctrl->wr_comp)
return -ENOMEM;
spin_lock_init(&ctrl->rx.lock);
spin_lock_init(&ctrl->tx.lock);
INIT_WORK(&ctrl->wd, qcom_slim_rxwq);
ctrl->rxwq = create_singlethread_workqueue("qcom_slim_rx");
if (!ctrl->rxwq) {
dev_err(ctrl->dev, "Failed to start Rx WQ\n");
return -ENOMEM;
}
ctrl->framer.rootfreq = SLIM_ROOT_FREQ / 8;
ctrl->framer.superfreq =
ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
sctrl->a_framer = &ctrl->framer;
sctrl->clkgear = SLIM_MAX_CLK_GEAR;
qcom_slim_prg_slew(pdev, ctrl);
ret = devm_request_irq(&pdev->dev, ctrl->irq, qcom_slim_interrupt,
IRQF_TRIGGER_HIGH, "qcom_slim_irq", ctrl);
if (ret) {
dev_err(&pdev->dev, "request IRQ failed\n");
goto err_request_irq_failed;
}
ret = clk_prepare_enable(ctrl->hclk);
if (ret)
goto err_hclk_enable_failed;
ret = clk_prepare_enable(ctrl->rclk);
if (ret)
goto err_rclk_enable_failed;
ctrl->tx.base = devm_kcalloc(&pdev->dev, ctrl->tx.n, ctrl->tx.sl_sz,
GFP_KERNEL);
if (!ctrl->tx.base) {
ret = -ENOMEM;
goto err;
}
ctrl->rx.base = devm_kcalloc(&pdev->dev,ctrl->rx.n, ctrl->rx.sl_sz,
GFP_KERNEL);
if (!ctrl->rx.base) {
ret = -ENOMEM;
goto err;
}
/* Register with framework before enabling frame, clock */
ret = slim_register_controller(&ctrl->ctrl);
if (ret) {
dev_err(ctrl->dev, "error adding controller\n");
goto err;
}
ver = readl_relaxed(ctrl->base);
/* Version info in 16 MSbits */
ver >>= 16;
/* Component register initialization */
writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
writel((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
ctrl->base + CFG_PORT(COMP_TRUST_CFG, ver));
writel((MGR_INT_TX_NACKED_2 |
MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
MGR_INT_TX_MSG_SENT), ctrl->base + MGR_INT_EN);
writel(1, ctrl->base + MGR_CFG);
/* Framer register initialization */
writel((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
(0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
ctrl->base + FRM_CFG);
writel(MGR_CFG_ENABLE, ctrl->base + MGR_CFG);
writel(1, ctrl->base + INTF_CFG);
writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, QCOM_SLIM_AUTOSUSPEND);
pm_runtime_set_active(&pdev->dev);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_enable(&pdev->dev);
dev_dbg(ctrl->dev, "QCOM SB controller is up:ver:0x%x!\n", ver);
return 0;
err:
clk_disable_unprepare(ctrl->rclk);
err_rclk_enable_failed:
clk_disable_unprepare(ctrl->hclk);
err_hclk_enable_failed:
err_request_irq_failed:
destroy_workqueue(ctrl->rxwq);
return ret;
}
static int qcom_slim_remove(struct platform_device *pdev)
{
struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
slim_unregister_controller(&ctrl->ctrl);
clk_disable_unprepare(ctrl->rclk);
clk_disable_unprepare(ctrl->hclk);
destroy_workqueue(ctrl->rxwq);
return 0;
}
/*
* If PM_RUNTIME is not defined, these 2 functions become helper
* functions to be called from system suspend/resume.
*/
#ifdef CONFIG_PM
static int qcom_slim_runtime_suspend(struct device *device)
{
struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
int ret;
dev_dbg(device, "pm_runtime: suspending...\n");
ret = slim_ctrl_clk_pause(&ctrl->ctrl, false, SLIM_CLK_UNSPECIFIED);
if (ret) {
dev_err(device, "clk pause not entered:%d", ret);
} else {
disable_irq(ctrl->irq);
clk_disable_unprepare(ctrl->hclk);
clk_disable_unprepare(ctrl->rclk);
}
return ret;
}
static int qcom_slim_runtime_resume(struct device *device)
{
struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
int ret = 0;
dev_dbg(device, "pm_runtime: resuming...\n");
ret = slim_ctrl_clk_pause(&ctrl->ctrl, true, 0);
if (ret)
dev_err(device, "clk pause not exited:%d", ret);
return ret;
}
#endif
#ifdef CONFIG_PM_SLEEP
static int qcom_slim_suspend(struct device *dev)
{
int ret = 0;
if (!pm_runtime_enabled(dev) ||
(!pm_runtime_suspended(dev))) {
dev_dbg(dev, "system suspend");
ret = qcom_slim_runtime_suspend(dev);
}
return ret;
}
static int qcom_slim_resume(struct device *dev)
{
if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
int ret;
dev_dbg(dev, "system resume");
ret = qcom_slim_runtime_resume(dev);
if (!ret) {
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
}
return ret;
}
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops qcom_slim_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(qcom_slim_suspend, qcom_slim_resume)
SET_RUNTIME_PM_OPS(
qcom_slim_runtime_suspend,
qcom_slim_runtime_resume,
NULL
)
};
static const struct of_device_id qcom_slim_dt_match[] = {
{ .compatible = "qcom,slim", },
{}
};
static struct platform_driver qcom_slim_driver = {
.probe = qcom_slim_probe,
.remove = qcom_slim_remove,
.driver = {
.name = "qcom_slim_ctrl",
.of_match_table = qcom_slim_dt_match,
.pm = &qcom_slim_dev_pm_ops,
},
};
module_platform_driver(qcom_slim_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm SLIMbus Controller");
| linux-master | drivers/slimbus/qcom-ctrl.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018, Linaro Limited
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/slimbus.h>
#include <uapi/sound/asound.h>
#include "slimbus.h"
/**
* struct segdist_code - Segment Distributions code from
* Table 20 of SLIMbus Specs Version 2.0
*
* @ratem: Channel Rate Multipler(Segments per Superframe)
* @seg_interval: Number of slots between the first Slot of Segment
* and the first slot of the next consecutive Segment.
* @segdist_code: Segment Distribution Code SD[11:0]
* @seg_offset_mask: Segment offset mask in SD[11:0]
* @segdist_codes: List of all possible Segmet Distribution codes.
*/
static const struct segdist_code {
int ratem;
int seg_interval;
int segdist_code;
u32 seg_offset_mask;
} segdist_codes[] = {
{1, 1536, 0x200, 0xdff},
{2, 768, 0x100, 0xcff},
{4, 384, 0x080, 0xc7f},
{8, 192, 0x040, 0xc3f},
{16, 96, 0x020, 0xc1f},
{32, 48, 0x010, 0xc0f},
{64, 24, 0x008, 0xc07},
{128, 12, 0x004, 0xc03},
{256, 6, 0x002, 0xc01},
{512, 3, 0x001, 0xc00},
{3, 512, 0xe00, 0x1ff},
{6, 256, 0xd00, 0x0ff},
{12, 128, 0xc80, 0x07f},
{24, 64, 0xc40, 0x03f},
{48, 32, 0xc20, 0x01f},
{96, 16, 0xc10, 0x00f},
{192, 8, 0xc08, 0x007},
{364, 4, 0xc04, 0x003},
{768, 2, 0xc02, 0x001},
};
/*
* Presence Rate table for all Natural Frequencies
* The Presence rate of a constant bitrate stream is mean flow rate of the
* stream expressed in occupied Segments of that Data Channel per second.
* Table 66 from SLIMbus 2.0 Specs
*
* Index of the table corresponds to Presence rate code for the respective rate
* in the table.
*/
static const int slim_presence_rate_table[] = {
0, /* Not Indicated */
12000,
24000,
48000,
96000,
192000,
384000,
768000,
0, /* Reserved */
11025,
22050,
44100,
88200,
176400,
352800,
705600,
4000,
8000,
16000,
32000,
64000,
128000,
256000,
512000,
};
/**
* slim_stream_allocate() - Allocate a new SLIMbus Stream
* @dev:Slim device to be associated with
* @name: name of the stream
*
* This is very first call for SLIMbus streaming, this API will allocate
* a new SLIMbus stream and return a valid stream runtime pointer for client
* to use it in subsequent stream apis. state of stream is set to ALLOCATED
*
* Return: valid pointer on success and error code on failure.
* From ASoC DPCM framework, this state is linked to startup() operation.
*/
struct slim_stream_runtime *slim_stream_allocate(struct slim_device *dev,
const char *name)
{
struct slim_stream_runtime *rt;
rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (!rt)
return ERR_PTR(-ENOMEM);
rt->name = kasprintf(GFP_KERNEL, "slim-%s", name);
if (!rt->name) {
kfree(rt);
return ERR_PTR(-ENOMEM);
}
rt->dev = dev;
spin_lock(&dev->stream_list_lock);
list_add_tail(&rt->node, &dev->stream_list);
spin_unlock(&dev->stream_list_lock);
return rt;
}
EXPORT_SYMBOL_GPL(slim_stream_allocate);
static int slim_connect_port_channel(struct slim_stream_runtime *stream,
struct slim_port *port)
{
struct slim_device *sdev = stream->dev;
u8 wbuf[2];
struct slim_val_inf msg = {0, 2, NULL, wbuf, NULL};
u8 mc = SLIM_MSG_MC_CONNECT_SOURCE;
DEFINE_SLIM_LDEST_TXN(txn, mc, 6, stream->dev->laddr, &msg);
if (port->direction == SLIM_PORT_SINK)
txn.mc = SLIM_MSG_MC_CONNECT_SINK;
wbuf[0] = port->id;
wbuf[1] = port->ch.id;
port->ch.state = SLIM_CH_STATE_ASSOCIATED;
port->state = SLIM_PORT_UNCONFIGURED;
return slim_do_transfer(sdev->ctrl, &txn);
}
static int slim_disconnect_port(struct slim_stream_runtime *stream,
struct slim_port *port)
{
struct slim_device *sdev = stream->dev;
u8 wbuf[1];
struct slim_val_inf msg = {0, 1, NULL, wbuf, NULL};
u8 mc = SLIM_MSG_MC_DISCONNECT_PORT;
DEFINE_SLIM_LDEST_TXN(txn, mc, 5, stream->dev->laddr, &msg);
wbuf[0] = port->id;
port->ch.state = SLIM_CH_STATE_DISCONNECTED;
port->state = SLIM_PORT_DISCONNECTED;
return slim_do_transfer(sdev->ctrl, &txn);
}
static int slim_deactivate_remove_channel(struct slim_stream_runtime *stream,
struct slim_port *port)
{
struct slim_device *sdev = stream->dev;
u8 wbuf[1];
struct slim_val_inf msg = {0, 1, NULL, wbuf, NULL};
u8 mc = SLIM_MSG_MC_NEXT_DEACTIVATE_CHANNEL;
DEFINE_SLIM_LDEST_TXN(txn, mc, 5, stream->dev->laddr, &msg);
int ret;
wbuf[0] = port->ch.id;
ret = slim_do_transfer(sdev->ctrl, &txn);
if (ret)
return ret;
txn.mc = SLIM_MSG_MC_NEXT_REMOVE_CHANNEL;
port->ch.state = SLIM_CH_STATE_REMOVED;
return slim_do_transfer(sdev->ctrl, &txn);
}
static int slim_get_prate_code(int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(slim_presence_rate_table); i++) {
if (rate == slim_presence_rate_table[i])
return i;
}
return -EINVAL;
}
/**
* slim_stream_prepare() - Prepare a SLIMbus Stream
*
* @rt: instance of slim stream runtime to configure
* @cfg: new configuration for the stream
*
* This API will configure SLIMbus stream with config parameters from cfg.
* return zero on success and error code on failure. From ASoC DPCM framework,
* this state is linked to hw_params() operation.
*/
int slim_stream_prepare(struct slim_stream_runtime *rt,
struct slim_stream_config *cfg)
{
struct slim_controller *ctrl = rt->dev->ctrl;
struct slim_port *port;
int num_ports, i, port_id, prrate;
if (rt->ports) {
dev_err(&rt->dev->dev, "Stream already Prepared\n");
return -EINVAL;
}
num_ports = hweight32(cfg->port_mask);
rt->ports = kcalloc(num_ports, sizeof(*port), GFP_KERNEL);
if (!rt->ports)
return -ENOMEM;
rt->num_ports = num_ports;
rt->rate = cfg->rate;
rt->bps = cfg->bps;
rt->direction = cfg->direction;
prrate = slim_get_prate_code(cfg->rate);
if (prrate < 0) {
dev_err(&rt->dev->dev, "Cannot get presence rate for rate %d Hz\n",
cfg->rate);
return prrate;
}
if (cfg->rate % ctrl->a_framer->superfreq) {
/*
* data rate not exactly multiple of super frame,
* use PUSH/PULL protocol
*/
if (cfg->direction == SNDRV_PCM_STREAM_PLAYBACK)
rt->prot = SLIM_PROTO_PUSH;
else
rt->prot = SLIM_PROTO_PULL;
} else {
rt->prot = SLIM_PROTO_ISO;
}
rt->ratem = cfg->rate/ctrl->a_framer->superfreq;
i = 0;
for_each_set_bit(port_id, &cfg->port_mask, SLIM_DEVICE_MAX_PORTS) {
port = &rt->ports[i];
port->state = SLIM_PORT_DISCONNECTED;
port->id = port_id;
port->ch.prrate = prrate;
port->ch.id = cfg->chs[i];
port->ch.data_fmt = SLIM_CH_DATA_FMT_NOT_DEFINED;
port->ch.aux_fmt = SLIM_CH_AUX_FMT_NOT_APPLICABLE;
port->ch.state = SLIM_CH_STATE_ALLOCATED;
if (cfg->direction == SNDRV_PCM_STREAM_PLAYBACK)
port->direction = SLIM_PORT_SINK;
else
port->direction = SLIM_PORT_SOURCE;
slim_connect_port_channel(rt, port);
i++;
}
return 0;
}
EXPORT_SYMBOL_GPL(slim_stream_prepare);
static int slim_define_channel_content(struct slim_stream_runtime *stream,
struct slim_port *port)
{
struct slim_device *sdev = stream->dev;
u8 wbuf[4];
struct slim_val_inf msg = {0, 4, NULL, wbuf, NULL};
u8 mc = SLIM_MSG_MC_NEXT_DEFINE_CONTENT;
DEFINE_SLIM_LDEST_TXN(txn, mc, 8, stream->dev->laddr, &msg);
wbuf[0] = port->ch.id;
wbuf[1] = port->ch.prrate;
/* Frequency Locked for ISO Protocol */
if (stream->prot != SLIM_PROTO_ISO)
wbuf[1] |= SLIM_CHANNEL_CONTENT_FL;
wbuf[2] = port->ch.data_fmt | (port->ch.aux_fmt << 4);
wbuf[3] = stream->bps/SLIM_SLOT_LEN_BITS;
port->ch.state = SLIM_CH_STATE_CONTENT_DEFINED;
return slim_do_transfer(sdev->ctrl, &txn);
}
static int slim_get_segdist_code(int ratem)
{
int i;
for (i = 0; i < ARRAY_SIZE(segdist_codes); i++) {
if (segdist_codes[i].ratem == ratem)
return segdist_codes[i].segdist_code;
}
return -EINVAL;
}
static int slim_define_channel(struct slim_stream_runtime *stream,
struct slim_port *port)
{
struct slim_device *sdev = stream->dev;
u8 wbuf[4];
struct slim_val_inf msg = {0, 4, NULL, wbuf, NULL};
u8 mc = SLIM_MSG_MC_NEXT_DEFINE_CHANNEL;
DEFINE_SLIM_LDEST_TXN(txn, mc, 8, stream->dev->laddr, &msg);
port->ch.seg_dist = slim_get_segdist_code(stream->ratem);
wbuf[0] = port->ch.id;
wbuf[1] = port->ch.seg_dist & 0xFF;
wbuf[2] = (stream->prot << 4) | ((port->ch.seg_dist & 0xF00) >> 8);
if (stream->prot == SLIM_PROTO_ISO)
wbuf[3] = stream->bps/SLIM_SLOT_LEN_BITS;
else
wbuf[3] = stream->bps/SLIM_SLOT_LEN_BITS + 1;
port->ch.state = SLIM_CH_STATE_DEFINED;
return slim_do_transfer(sdev->ctrl, &txn);
}
static int slim_activate_channel(struct slim_stream_runtime *stream,
struct slim_port *port)
{
struct slim_device *sdev = stream->dev;
u8 wbuf[1];
struct slim_val_inf msg = {0, 1, NULL, wbuf, NULL};
u8 mc = SLIM_MSG_MC_NEXT_ACTIVATE_CHANNEL;
DEFINE_SLIM_LDEST_TXN(txn, mc, 5, stream->dev->laddr, &msg);
txn.msg->num_bytes = 1;
txn.msg->wbuf = wbuf;
wbuf[0] = port->ch.id;
port->ch.state = SLIM_CH_STATE_ACTIVE;
return slim_do_transfer(sdev->ctrl, &txn);
}
/**
* slim_stream_enable() - Enable a prepared SLIMbus Stream
*
* @stream: instance of slim stream runtime to enable
*
* This API will enable all the ports and channels associated with
* SLIMbus stream
*
* Return: zero on success and error code on failure. From ASoC DPCM framework,
* this state is linked to trigger() start operation.
*/
int slim_stream_enable(struct slim_stream_runtime *stream)
{
DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION,
3, SLIM_LA_MANAGER, NULL);
struct slim_controller *ctrl = stream->dev->ctrl;
int ret, i;
if (ctrl->enable_stream) {
ret = ctrl->enable_stream(stream);
if (ret)
return ret;
for (i = 0; i < stream->num_ports; i++)
stream->ports[i].ch.state = SLIM_CH_STATE_ACTIVE;
return ret;
}
ret = slim_do_transfer(ctrl, &txn);
if (ret)
return ret;
/* define channels first before activating them */
for (i = 0; i < stream->num_ports; i++) {
struct slim_port *port = &stream->ports[i];
slim_define_channel(stream, port);
slim_define_channel_content(stream, port);
}
for (i = 0; i < stream->num_ports; i++) {
struct slim_port *port = &stream->ports[i];
slim_activate_channel(stream, port);
port->state = SLIM_PORT_CONFIGURED;
}
txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
return slim_do_transfer(ctrl, &txn);
}
EXPORT_SYMBOL_GPL(slim_stream_enable);
/**
* slim_stream_disable() - Disable a SLIMbus Stream
*
* @stream: instance of slim stream runtime to disable
*
* This API will disable all the ports and channels associated with
* SLIMbus stream
*
* Return: zero on success and error code on failure. From ASoC DPCM framework,
* this state is linked to trigger() pause operation.
*/
int slim_stream_disable(struct slim_stream_runtime *stream)
{
DEFINE_SLIM_BCAST_TXN(txn, SLIM_MSG_MC_BEGIN_RECONFIGURATION,
3, SLIM_LA_MANAGER, NULL);
struct slim_controller *ctrl = stream->dev->ctrl;
int ret, i;
if (!stream->ports || !stream->num_ports)
return -EINVAL;
if (ctrl->disable_stream)
ctrl->disable_stream(stream);
ret = slim_do_transfer(ctrl, &txn);
if (ret)
return ret;
for (i = 0; i < stream->num_ports; i++)
slim_deactivate_remove_channel(stream, &stream->ports[i]);
txn.mc = SLIM_MSG_MC_RECONFIGURE_NOW;
return slim_do_transfer(ctrl, &txn);
}
EXPORT_SYMBOL_GPL(slim_stream_disable);
/**
* slim_stream_unprepare() - Un-prepare a SLIMbus Stream
*
* @stream: instance of slim stream runtime to unprepare
*
* This API will un allocate all the ports and channels associated with
* SLIMbus stream
*
* Return: zero on success and error code on failure. From ASoC DPCM framework,
* this state is linked to trigger() stop operation.
*/
int slim_stream_unprepare(struct slim_stream_runtime *stream)
{
int i;
if (!stream->ports || !stream->num_ports)
return -EINVAL;
for (i = 0; i < stream->num_ports; i++)
slim_disconnect_port(stream, &stream->ports[i]);
kfree(stream->ports);
stream->ports = NULL;
stream->num_ports = 0;
return 0;
}
EXPORT_SYMBOL_GPL(slim_stream_unprepare);
/**
* slim_stream_free() - Free a SLIMbus Stream
*
* @stream: instance of slim stream runtime to free
*
* This API will un allocate all the memory associated with
* slim stream runtime, user is not allowed to make an dereference
* to stream after this call.
*
* Return: zero on success and error code on failure. From ASoC DPCM framework,
* this state is linked to shutdown() operation.
*/
int slim_stream_free(struct slim_stream_runtime *stream)
{
struct slim_device *sdev = stream->dev;
spin_lock(&sdev->stream_list_lock);
list_del(&stream->node);
spin_unlock(&sdev->stream_list_lock);
kfree(stream->name);
kfree(stream);
return 0;
}
EXPORT_SYMBOL_GPL(slim_stream_free);
| linux-master | drivers/slimbus/stream.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/mcb.h>
#include "mcb-internal.h"
struct mcb_parse_priv {
phys_addr_t mapbase;
void __iomem *base;
};
#define for_each_chameleon_cell(dtype, p) \
for ((dtype) = get_next_dtype((p)); \
(dtype) != CHAMELEON_DTYPE_END; \
(dtype) = get_next_dtype((p)))
static inline uint32_t get_next_dtype(void __iomem *p)
{
uint32_t dtype;
dtype = readl(p);
return dtype >> 28;
}
static int chameleon_parse_bdd(struct mcb_bus *bus,
struct chameleon_bar *cb,
void __iomem *base)
{
return 0;
}
static int chameleon_parse_gdd(struct mcb_bus *bus,
struct chameleon_bar *cb,
void __iomem *base, int bar_count)
{
struct chameleon_gdd __iomem *gdd =
(struct chameleon_gdd __iomem *) base;
struct mcb_device *mdev;
u32 dev_mapbase;
u32 offset;
u32 size;
int ret;
__le32 reg1;
__le32 reg2;
mdev = mcb_alloc_dev(bus);
if (!mdev)
return -ENOMEM;
reg1 = readl(&gdd->reg1);
reg2 = readl(&gdd->reg2);
offset = readl(&gdd->offset);
size = readl(&gdd->size);
mdev->id = GDD_DEV(reg1);
mdev->rev = GDD_REV(reg1);
mdev->var = GDD_VAR(reg1);
mdev->bar = GDD_BAR(reg2);
mdev->group = GDD_GRP(reg2);
mdev->inst = GDD_INS(reg2);
/*
* If the BAR is missing, dev_mapbase is zero, or if the
* device is IO mapped we just print a warning and go on with the
* next device, instead of completely stop the gdd parser
*/
if (mdev->bar > bar_count - 1) {
pr_info("No BAR for 16z%03d\n", mdev->id);
ret = 0;
goto err;
}
dev_mapbase = cb[mdev->bar].addr;
if (!dev_mapbase) {
pr_info("BAR not assigned for 16z%03d\n", mdev->id);
ret = 0;
goto err;
}
if (dev_mapbase & 0x01) {
pr_info("IO mapped Device (16z%03d) not yet supported\n",
mdev->id);
ret = 0;
goto err;
}
pr_debug("Found a 16z%03d\n", mdev->id);
mdev->irq.start = GDD_IRQ(reg1);
mdev->irq.end = GDD_IRQ(reg1);
mdev->irq.flags = IORESOURCE_IRQ;
mdev->mem.start = dev_mapbase + offset;
mdev->mem.end = mdev->mem.start + size - 1;
mdev->mem.flags = IORESOURCE_MEM;
mdev->is_added = false;
ret = mcb_device_register(bus, mdev);
if (ret < 0)
goto err;
return 0;
err:
put_device(&mdev->dev);
return ret;
}
static void chameleon_parse_bar(void __iomem *base,
struct chameleon_bar *cb, int bar_count)
{
char __iomem *p = base;
int i;
/* skip reg1 */
p += sizeof(__le32);
for (i = 0; i < bar_count; i++) {
cb[i].addr = readl(p);
cb[i].size = readl(p + 4);
p += sizeof(struct chameleon_bar);
}
}
static int chameleon_get_bar(void __iomem **base, phys_addr_t mapbase,
struct chameleon_bar **cb)
{
struct chameleon_bar *c;
int bar_count;
__le32 reg;
u32 dtype;
/*
* For those devices which are not connected
* to the PCI Bus (e.g. LPC) there is a bar
* descriptor located directly after the
* chameleon header. This header is comparable
* to a PCI header.
*/
dtype = get_next_dtype(*base);
if (dtype == CHAMELEON_DTYPE_BAR) {
reg = readl(*base);
bar_count = BAR_CNT(reg);
if (bar_count <= 0 || bar_count > CHAMELEON_BAR_MAX)
return -ENODEV;
c = kcalloc(bar_count, sizeof(struct chameleon_bar),
GFP_KERNEL);
if (!c)
return -ENOMEM;
chameleon_parse_bar(*base, c, bar_count);
*base += BAR_DESC_SIZE(bar_count);
} else {
c = kzalloc(sizeof(struct chameleon_bar), GFP_KERNEL);
if (!c)
return -ENOMEM;
bar_count = 1;
c->addr = mapbase;
}
*cb = c;
return bar_count;
}
int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
void __iomem *base)
{
struct chameleon_fpga_header *header;
struct chameleon_bar *cb;
void __iomem *p = base;
int num_cells = 0;
uint32_t dtype;
int bar_count;
int ret;
u32 hsize;
u32 table_size;
hsize = sizeof(struct chameleon_fpga_header);
header = kzalloc(hsize, GFP_KERNEL);
if (!header)
return -ENOMEM;
/* Extract header information */
memcpy_fromio(header, p, hsize);
/* We only support chameleon v2 at the moment */
header->magic = le16_to_cpu(header->magic);
if (header->magic != CHAMELEONV2_MAGIC) {
pr_err("Unsupported chameleon version 0x%x\n",
header->magic);
ret = -ENODEV;
goto free_header;
}
p += hsize;
bus->revision = header->revision;
bus->model = header->model;
bus->minor = header->minor;
snprintf(bus->name, CHAMELEON_FILENAME_LEN + 1, "%s",
header->filename);
bar_count = chameleon_get_bar(&p, mapbase, &cb);
if (bar_count < 0) {
ret = bar_count;
goto free_header;
}
for_each_chameleon_cell(dtype, p) {
switch (dtype) {
case CHAMELEON_DTYPE_GENERAL:
ret = chameleon_parse_gdd(bus, cb, p, bar_count);
if (ret < 0)
goto free_bar;
p += sizeof(struct chameleon_gdd);
break;
case CHAMELEON_DTYPE_BRIDGE:
chameleon_parse_bdd(bus, cb, p);
p += sizeof(struct chameleon_bdd);
break;
case CHAMELEON_DTYPE_END:
break;
default:
pr_err("Invalid chameleon descriptor type 0x%x\n",
dtype);
ret = -EINVAL;
goto free_bar;
}
num_cells++;
}
if (num_cells == 0) {
ret = -EINVAL;
goto free_bar;
}
table_size = p - base;
pr_debug("%d cell(s) found. Chameleon table size: 0x%04x bytes\n", num_cells, table_size);
kfree(cb);
kfree(header);
return table_size;
free_bar:
kfree(cb);
free_header:
kfree(header);
return ret;
}
EXPORT_SYMBOL_NS_GPL(chameleon_parse_cells, MCB);
| linux-master | drivers/mcb/mcb-parse.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MEN Chameleon Bus.
*
* Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
* Author: Johannes Thumshirn <[email protected]>
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/mcb.h>
#include "mcb-internal.h"
struct priv {
struct mcb_bus *bus;
phys_addr_t mapbase;
void __iomem *base;
};
static int mcb_pci_get_irq(struct mcb_device *mdev)
{
struct mcb_bus *mbus = mdev->bus;
struct device *dev = mbus->carrier;
struct pci_dev *pdev = to_pci_dev(dev);
return pdev->irq;
}
static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct resource *res;
struct priv *priv;
int ret, table_size;
unsigned long flags;
priv = devm_kzalloc(&pdev->dev, sizeof(struct priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "Failed to enable PCI device\n");
return -ENODEV;
}
pci_set_master(pdev);
priv->mapbase = pci_resource_start(pdev, 0);
if (!priv->mapbase) {
dev_err(&pdev->dev, "No PCI resource\n");
ret = -ENODEV;
goto out_disable;
}
res = devm_request_mem_region(&pdev->dev, priv->mapbase,
CHAM_HEADER_SIZE,
KBUILD_MODNAME);
if (!res) {
dev_err(&pdev->dev, "Failed to request PCI memory\n");
ret = -EBUSY;
goto out_disable;
}
priv->base = devm_ioremap(&pdev->dev, priv->mapbase, CHAM_HEADER_SIZE);
if (!priv->base) {
dev_err(&pdev->dev, "Cannot ioremap\n");
ret = -ENOMEM;
goto out_disable;
}
flags = pci_resource_flags(pdev, 0);
if (flags & IORESOURCE_IO) {
ret = -ENOTSUPP;
dev_err(&pdev->dev,
"IO mapped PCI devices are not supported\n");
goto out_disable;
}
pci_set_drvdata(pdev, priv);
priv->bus = mcb_alloc_bus(&pdev->dev);
if (IS_ERR(priv->bus)) {
ret = PTR_ERR(priv->bus);
goto out_disable;
}
priv->bus->get_irq = mcb_pci_get_irq;
ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
if (ret < 0)
goto out_mcb_bus;
table_size = ret;
if (table_size < CHAM_HEADER_SIZE) {
/* Release the previous resources */
devm_iounmap(&pdev->dev, priv->base);
devm_release_mem_region(&pdev->dev, priv->mapbase, CHAM_HEADER_SIZE);
/* Then, allocate it again with the actual chameleon table size */
res = devm_request_mem_region(&pdev->dev, priv->mapbase,
table_size,
KBUILD_MODNAME);
if (!res) {
dev_err(&pdev->dev, "Failed to request PCI memory\n");
ret = -EBUSY;
goto out_mcb_bus;
}
priv->base = devm_ioremap(&pdev->dev, priv->mapbase, table_size);
if (!priv->base) {
dev_err(&pdev->dev, "Cannot ioremap\n");
ret = -ENOMEM;
goto out_mcb_bus;
}
}
mcb_bus_add_devices(priv->bus);
return 0;
out_mcb_bus:
mcb_release_bus(priv->bus);
out_disable:
pci_disable_device(pdev);
return ret;
}
static void mcb_pci_remove(struct pci_dev *pdev)
{
struct priv *priv = pci_get_drvdata(pdev);
mcb_release_bus(priv->bus);
pci_disable_device(pdev);
}
static const struct pci_device_id mcb_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEN, PCI_DEVICE_ID_MEN_CHAMELEON) },
{ PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_MEN_CHAMELEON) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, mcb_pci_tbl);
static struct pci_driver mcb_pci_driver = {
.name = "mcb-pci",
.id_table = mcb_pci_tbl,
.probe = mcb_pci_probe,
.remove = mcb_pci_remove,
};
module_pci_driver(mcb_pci_driver);
MODULE_AUTHOR("Johannes Thumshirn <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MCB over PCI support");
MODULE_IMPORT_NS(MCB);
| linux-master | drivers/mcb/mcb-pci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MEN Chameleon Bus.
*
* Copyright (C) 2014 MEN Mikroelektronik GmbH (www.men.de)
* Author: Andreas Werner <[email protected]>
*/
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/dmi.h>
#include <linux/mcb.h>
#include <linux/io.h>
#include "mcb-internal.h"
struct priv {
struct mcb_bus *bus;
struct resource *mem;
void __iomem *base;
};
static int mcb_lpc_probe(struct platform_device *pdev)
{
struct resource *res;
struct priv *priv;
int ret = 0, table_size;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!priv->mem) {
dev_err(&pdev->dev, "No Memory resource\n");
return -ENODEV;
}
res = devm_request_mem_region(&pdev->dev, priv->mem->start,
resource_size(priv->mem),
KBUILD_MODNAME);
if (!res) {
dev_err(&pdev->dev, "Failed to request IO memory\n");
return -EBUSY;
}
priv->base = devm_ioremap(&pdev->dev, priv->mem->start,
resource_size(priv->mem));
if (!priv->base) {
dev_err(&pdev->dev, "Cannot ioremap\n");
return -ENOMEM;
}
platform_set_drvdata(pdev, priv);
priv->bus = mcb_alloc_bus(&pdev->dev);
if (IS_ERR(priv->bus))
return PTR_ERR(priv->bus);
ret = chameleon_parse_cells(priv->bus, priv->mem->start, priv->base);
if (ret < 0) {
goto out_mcb_bus;
}
table_size = ret;
if (table_size < CHAM_HEADER_SIZE) {
/* Release the previous resources */
devm_iounmap(&pdev->dev, priv->base);
devm_release_mem_region(&pdev->dev, priv->mem->start, resource_size(priv->mem));
/* Then, allocate it again with the actual chameleon table size */
res = devm_request_mem_region(&pdev->dev, priv->mem->start,
table_size,
KBUILD_MODNAME);
if (!res) {
dev_err(&pdev->dev, "Failed to request PCI memory\n");
ret = -EBUSY;
goto out_mcb_bus;
}
priv->base = devm_ioremap(&pdev->dev, priv->mem->start, table_size);
if (!priv->base) {
dev_err(&pdev->dev, "Cannot ioremap\n");
ret = -ENOMEM;
goto out_mcb_bus;
}
platform_set_drvdata(pdev, priv);
}
mcb_bus_add_devices(priv->bus);
return 0;
out_mcb_bus:
mcb_release_bus(priv->bus);
return ret;
}
static int mcb_lpc_remove(struct platform_device *pdev)
{
struct priv *priv = platform_get_drvdata(pdev);
mcb_release_bus(priv->bus);
return 0;
}
static struct platform_device *mcb_lpc_pdev;
static int mcb_lpc_create_platform_device(const struct dmi_system_id *id)
{
struct resource *res = id->driver_data;
int ret;
mcb_lpc_pdev = platform_device_alloc("mcb-lpc", -1);
if (!mcb_lpc_pdev)
return -ENOMEM;
ret = platform_device_add_resources(mcb_lpc_pdev, res, 1);
if (ret)
goto out_put;
ret = platform_device_add(mcb_lpc_pdev);
if (ret)
goto out_put;
return 0;
out_put:
platform_device_put(mcb_lpc_pdev);
return ret;
}
static struct resource sc24_fpga_resource = DEFINE_RES_MEM(0xe000e000, CHAM_HEADER_SIZE);
static struct resource sc31_fpga_resource = DEFINE_RES_MEM(0xf000e000, CHAM_HEADER_SIZE);
static struct platform_driver mcb_lpc_driver = {
.driver = {
.name = "mcb-lpc",
},
.probe = mcb_lpc_probe,
.remove = mcb_lpc_remove,
};
static const struct dmi_system_id mcb_lpc_dmi_table[] = {
{
.ident = "SC24",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEN"),
DMI_MATCH(DMI_PRODUCT_VERSION, "14SC24"),
},
.driver_data = (void *)&sc24_fpga_resource,
.callback = mcb_lpc_create_platform_device,
},
{
.ident = "SC31",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MEN"),
DMI_MATCH(DMI_PRODUCT_VERSION, "14SC31"),
},
.driver_data = (void *)&sc31_fpga_resource,
.callback = mcb_lpc_create_platform_device,
},
{}
};
MODULE_DEVICE_TABLE(dmi, mcb_lpc_dmi_table);
static int __init mcb_lpc_init(void)
{
if (!dmi_check_system(mcb_lpc_dmi_table))
return -ENODEV;
return platform_driver_register(&mcb_lpc_driver);
}
static void __exit mcb_lpc_exit(void)
{
platform_device_unregister(mcb_lpc_pdev);
platform_driver_unregister(&mcb_lpc_driver);
}
module_init(mcb_lpc_init);
module_exit(mcb_lpc_exit);
MODULE_AUTHOR("Andreas Werner <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MCB over LPC support");
MODULE_IMPORT_NS(MCB);
| linux-master | drivers/mcb/mcb-lpc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* MEN Chameleon Bus.
*
* Copyright (C) 2013 MEN Mikroelektronik GmbH (www.men.de)
* Author: Johannes Thumshirn <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/idr.h>
#include <linux/mcb.h>
static DEFINE_IDA(mcb_ida);
static const struct mcb_device_id *mcb_match_id(const struct mcb_device_id *ids,
struct mcb_device *dev)
{
if (ids) {
while (ids->device) {
if (ids->device == dev->id)
return ids;
ids++;
}
}
return NULL;
}
static int mcb_match(struct device *dev, struct device_driver *drv)
{
struct mcb_driver *mdrv = to_mcb_driver(drv);
struct mcb_device *mdev = to_mcb_device(dev);
const struct mcb_device_id *found_id;
found_id = mcb_match_id(mdrv->id_table, mdev);
if (found_id)
return 1;
return 0;
}
static int mcb_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct mcb_device *mdev = to_mcb_device(dev);
int ret;
ret = add_uevent_var(env, "MODALIAS=mcb:16z%03d", mdev->id);
if (ret)
return -ENOMEM;
return 0;
}
static int mcb_probe(struct device *dev)
{
struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
struct mcb_device *mdev = to_mcb_device(dev);
const struct mcb_device_id *found_id;
struct module *carrier_mod;
int ret;
found_id = mcb_match_id(mdrv->id_table, mdev);
if (!found_id)
return -ENODEV;
carrier_mod = mdev->dev.parent->driver->owner;
if (!try_module_get(carrier_mod))
return -EINVAL;
get_device(dev);
ret = mdrv->probe(mdev, found_id);
if (ret) {
module_put(carrier_mod);
put_device(dev);
}
return ret;
}
static void mcb_remove(struct device *dev)
{
struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
struct mcb_device *mdev = to_mcb_device(dev);
struct module *carrier_mod;
mdrv->remove(mdev);
carrier_mod = mdev->dev.parent->driver->owner;
module_put(carrier_mod);
put_device(&mdev->dev);
}
static void mcb_shutdown(struct device *dev)
{
struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
struct mcb_device *mdev = to_mcb_device(dev);
if (mdrv && mdrv->shutdown)
mdrv->shutdown(mdev);
}
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mcb_bus *bus = to_mcb_bus(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", bus->revision);
}
static DEVICE_ATTR_RO(revision);
static ssize_t model_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mcb_bus *bus = to_mcb_bus(dev);
return scnprintf(buf, PAGE_SIZE, "%c\n", bus->model);
}
static DEVICE_ATTR_RO(model);
static ssize_t minor_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mcb_bus *bus = to_mcb_bus(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", bus->minor);
}
static DEVICE_ATTR_RO(minor);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mcb_bus *bus = to_mcb_bus(dev);
return scnprintf(buf, PAGE_SIZE, "%s\n", bus->name);
}
static DEVICE_ATTR_RO(name);
static struct attribute *mcb_bus_attrs[] = {
&dev_attr_revision.attr,
&dev_attr_model.attr,
&dev_attr_minor.attr,
&dev_attr_name.attr,
NULL,
};
static const struct attribute_group mcb_carrier_group = {
.attrs = mcb_bus_attrs,
};
static const struct attribute_group *mcb_carrier_groups[] = {
&mcb_carrier_group,
NULL,
};
static struct bus_type mcb_bus_type = {
.name = "mcb",
.match = mcb_match,
.uevent = mcb_uevent,
.probe = mcb_probe,
.remove = mcb_remove,
.shutdown = mcb_shutdown,
};
static struct device_type mcb_carrier_device_type = {
.name = "mcb-carrier",
.groups = mcb_carrier_groups,
};
/**
* __mcb_register_driver() - Register a @mcb_driver at the system
* @drv: The @mcb_driver
* @owner: The @mcb_driver's module
* @mod_name: The name of the @mcb_driver's module
*
* Register a @mcb_driver at the system. Perform some sanity checks, if
* the .probe and .remove methods are provided by the driver.
*/
int __mcb_register_driver(struct mcb_driver *drv, struct module *owner,
const char *mod_name)
{
if (!drv->probe || !drv->remove)
return -EINVAL;
drv->driver.owner = owner;
drv->driver.bus = &mcb_bus_type;
drv->driver.mod_name = mod_name;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_NS_GPL(__mcb_register_driver, MCB);
/**
* mcb_unregister_driver() - Unregister a @mcb_driver from the system
* @drv: The @mcb_driver
*
* Unregister a @mcb_driver from the system.
*/
void mcb_unregister_driver(struct mcb_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_NS_GPL(mcb_unregister_driver, MCB);
static void mcb_release_dev(struct device *dev)
{
struct mcb_device *mdev = to_mcb_device(dev);
mcb_bus_put(mdev->bus);
kfree(mdev);
}
/**
* mcb_device_register() - Register a mcb_device
* @bus: The @mcb_bus of the device
* @dev: The @mcb_device
*
* Register a specific @mcb_device at a @mcb_bus and the system itself.
*/
int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
{
int ret;
int device_id;
device_initialize(&dev->dev);
mcb_bus_get(bus);
dev->dev.bus = &mcb_bus_type;
dev->dev.parent = bus->dev.parent;
dev->dev.release = mcb_release_dev;
dev->dma_dev = bus->carrier;
device_id = dev->id;
dev_set_name(&dev->dev, "mcb%d-16z%03d-%d:%d:%d",
bus->bus_nr, device_id, dev->inst, dev->group, dev->var);
ret = device_add(&dev->dev);
if (ret < 0) {
pr_err("Failed registering device 16z%03d on bus mcb%d (%d)\n",
device_id, bus->bus_nr, ret);
goto out;
}
return 0;
out:
return ret;
}
EXPORT_SYMBOL_NS_GPL(mcb_device_register, MCB);
static void mcb_free_bus(struct device *dev)
{
struct mcb_bus *bus = to_mcb_bus(dev);
put_device(bus->carrier);
ida_free(&mcb_ida, bus->bus_nr);
kfree(bus);
}
/**
* mcb_alloc_bus() - Allocate a new @mcb_bus
*
* Allocate a new @mcb_bus.
*/
struct mcb_bus *mcb_alloc_bus(struct device *carrier)
{
struct mcb_bus *bus;
int bus_nr;
int rc;
bus = kzalloc(sizeof(struct mcb_bus), GFP_KERNEL);
if (!bus)
return ERR_PTR(-ENOMEM);
bus_nr = ida_alloc(&mcb_ida, GFP_KERNEL);
if (bus_nr < 0) {
kfree(bus);
return ERR_PTR(bus_nr);
}
bus->bus_nr = bus_nr;
bus->carrier = get_device(carrier);
device_initialize(&bus->dev);
bus->dev.parent = carrier;
bus->dev.bus = &mcb_bus_type;
bus->dev.type = &mcb_carrier_device_type;
bus->dev.release = &mcb_free_bus;
dev_set_name(&bus->dev, "mcb:%d", bus_nr);
rc = device_add(&bus->dev);
if (rc)
goto err_put;
return bus;
err_put:
put_device(&bus->dev);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
static int __mcb_devices_unregister(struct device *dev, void *data)
{
device_unregister(dev);
return 0;
}
static void mcb_devices_unregister(struct mcb_bus *bus)
{
bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_devices_unregister);
}
/**
* mcb_release_bus() - Free a @mcb_bus
* @bus: The @mcb_bus to release
*
* Release an allocated @mcb_bus from the system.
*/
void mcb_release_bus(struct mcb_bus *bus)
{
mcb_devices_unregister(bus);
}
EXPORT_SYMBOL_NS_GPL(mcb_release_bus, MCB);
/**
* mcb_bus_put() - Increment refcnt
* @bus: The @mcb_bus
*
* Get a @mcb_bus' ref
*/
struct mcb_bus *mcb_bus_get(struct mcb_bus *bus)
{
if (bus)
get_device(&bus->dev);
return bus;
}
EXPORT_SYMBOL_NS_GPL(mcb_bus_get, MCB);
/**
* mcb_bus_put() - Decrement refcnt
* @bus: The @mcb_bus
*
* Release a @mcb_bus' ref
*/
void mcb_bus_put(struct mcb_bus *bus)
{
if (bus)
put_device(&bus->dev);
}
EXPORT_SYMBOL_NS_GPL(mcb_bus_put, MCB);
/**
* mcb_alloc_dev() - Allocate a device
* @bus: The @mcb_bus the device is part of
*
* Allocate a @mcb_device and add bus.
*/
struct mcb_device *mcb_alloc_dev(struct mcb_bus *bus)
{
struct mcb_device *dev;
dev = kzalloc(sizeof(struct mcb_device), GFP_KERNEL);
if (!dev)
return NULL;
dev->bus = bus;
return dev;
}
EXPORT_SYMBOL_NS_GPL(mcb_alloc_dev, MCB);
/**
* mcb_free_dev() - Free @mcb_device
* @dev: The device to free
*
* Free a @mcb_device
*/
void mcb_free_dev(struct mcb_device *dev)
{
kfree(dev);
}
EXPORT_SYMBOL_NS_GPL(mcb_free_dev, MCB);
static int __mcb_bus_add_devices(struct device *dev, void *data)
{
struct mcb_device *mdev = to_mcb_device(dev);
int retval;
if (mdev->is_added)
return 0;
retval = device_attach(dev);
if (retval < 0)
dev_err(dev, "Error adding device (%d)\n", retval);
mdev->is_added = true;
return 0;
}
/**
* mcb_bus_add_devices() - Add devices in the bus' internal device list
* @bus: The @mcb_bus we add the devices
*
* Add devices in the bus' internal device list to the system.
*/
void mcb_bus_add_devices(const struct mcb_bus *bus)
{
bus_for_each_dev(&mcb_bus_type, NULL, NULL, __mcb_bus_add_devices);
}
EXPORT_SYMBOL_NS_GPL(mcb_bus_add_devices, MCB);
/**
* mcb_get_resource() - get a resource for a mcb device
* @dev: the mcb device
* @type: the type of resource
*/
struct resource *mcb_get_resource(struct mcb_device *dev, unsigned int type)
{
if (type == IORESOURCE_MEM)
return &dev->mem;
else if (type == IORESOURCE_IRQ)
return &dev->irq;
else
return NULL;
}
EXPORT_SYMBOL_NS_GPL(mcb_get_resource, MCB);
/**
* mcb_request_mem() - Request memory
* @dev: The @mcb_device the memory is for
* @name: The name for the memory reference.
*
* Request memory for a @mcb_device. If @name is NULL the driver name will
* be used.
*/
struct resource *mcb_request_mem(struct mcb_device *dev, const char *name)
{
struct resource *mem;
u32 size;
if (!name)
name = dev->dev.driver->name;
size = resource_size(&dev->mem);
mem = request_mem_region(dev->mem.start, size, name);
if (!mem)
return ERR_PTR(-EBUSY);
return mem;
}
EXPORT_SYMBOL_NS_GPL(mcb_request_mem, MCB);
/**
* mcb_release_mem() - Release memory requested by device
* @dev: The @mcb_device that requested the memory
*
* Release memory that was prior requested via @mcb_request_mem().
*/
void mcb_release_mem(struct resource *mem)
{
u32 size;
size = resource_size(mem);
release_mem_region(mem->start, size);
}
EXPORT_SYMBOL_NS_GPL(mcb_release_mem, MCB);
static int __mcb_get_irq(struct mcb_device *dev)
{
struct resource *irq;
irq = mcb_get_resource(dev, IORESOURCE_IRQ);
return irq->start;
}
/**
* mcb_get_irq() - Get device's IRQ number
* @dev: The @mcb_device the IRQ is for
*
* Get the IRQ number of a given @mcb_device.
*/
int mcb_get_irq(struct mcb_device *dev)
{
struct mcb_bus *bus = dev->bus;
if (bus->get_irq)
return bus->get_irq(dev);
return __mcb_get_irq(dev);
}
EXPORT_SYMBOL_NS_GPL(mcb_get_irq, MCB);
static int mcb_init(void)
{
return bus_register(&mcb_bus_type);
}
static void mcb_exit(void)
{
ida_destroy(&mcb_ida);
bus_unregister(&mcb_bus_type);
}
/* mcb must be initialized after PCI but before the chameleon drivers.
* That means we must use some initcall between subsys_initcall and
* device_initcall.
*/
fs_initcall(mcb_init);
module_exit(mcb_exit);
MODULE_DESCRIPTION("MEN Chameleon Bus Driver");
MODULE_AUTHOR("Johannes Thumshirn <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/mcb/mcb-core.c |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2023, Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/interconnect-clk.h>
#include <linux/interconnect-provider.h>
struct icc_clk_node {
struct clk *clk;
bool enabled;
};
struct icc_clk_provider {
struct icc_provider provider;
int num_clocks;
struct icc_clk_node clocks[] __counted_by(num_clocks);
};
#define to_icc_clk_provider(_provider) \
container_of(_provider, struct icc_clk_provider, provider)
static int icc_clk_set(struct icc_node *src, struct icc_node *dst)
{
struct icc_clk_node *qn = src->data;
int ret;
if (!qn || !qn->clk)
return 0;
if (!src->peak_bw) {
if (qn->enabled)
clk_disable_unprepare(qn->clk);
qn->enabled = false;
return 0;
}
if (!qn->enabled) {
ret = clk_prepare_enable(qn->clk);
if (ret)
return ret;
qn->enabled = true;
}
return clk_set_rate(qn->clk, icc_units_to_bps(src->peak_bw));
}
static int icc_clk_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
{
struct icc_clk_node *qn = node->data;
if (!qn || !qn->clk)
*peak = INT_MAX;
else
*peak = Bps_to_icc(clk_get_rate(qn->clk));
return 0;
}
/**
* icc_clk_register() - register a new clk-based interconnect provider
* @dev: device supporting this provider
* @first_id: an ID of the first provider's node
* @num_clocks: number of instances of struct icc_clk_data
* @data: data for the provider
*
* Registers and returns a clk-based interconnect provider. It is a simple
* wrapper around COMMON_CLK framework, allowing other devices to vote on the
* clock rate.
*
* Return: 0 on success, or an error code otherwise
*/
struct icc_provider *icc_clk_register(struct device *dev,
unsigned int first_id,
unsigned int num_clocks,
const struct icc_clk_data *data)
{
struct icc_clk_provider *qp;
struct icc_provider *provider;
struct icc_onecell_data *onecell;
struct icc_node *node;
int ret, i, j;
onecell = devm_kzalloc(dev, struct_size(onecell, nodes, 2 * num_clocks), GFP_KERNEL);
if (!onecell)
return ERR_PTR(-ENOMEM);
qp = devm_kzalloc(dev, struct_size(qp, clocks, num_clocks), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
qp->num_clocks = num_clocks;
provider = &qp->provider;
provider->dev = dev;
provider->get_bw = icc_clk_get_bw;
provider->set = icc_clk_set;
provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
INIT_LIST_HEAD(&provider->nodes);
provider->data = onecell;
icc_provider_init(provider);
for (i = 0, j = 0; i < num_clocks; i++) {
qp->clocks[i].clk = data[i].clk;
node = icc_node_create(first_id + j);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
goto err;
}
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_master", data[i].name);
node->data = &qp->clocks[i];
icc_node_add(node, provider);
/* link to the next node, slave */
icc_link_create(node, first_id + j + 1);
onecell->nodes[j++] = node;
node = icc_node_create(first_id + j);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
goto err;
}
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_slave", data[i].name);
/* no data for slave node */
icc_node_add(node, provider);
onecell->nodes[j++] = node;
}
onecell->num_nodes = j;
ret = icc_provider_register(provider);
if (ret)
goto err;
return provider;
err:
icc_nodes_remove(provider);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(icc_clk_register);
/**
* icc_clk_unregister() - unregister a previously registered clk interconnect provider
* @provider: provider returned by icc_clk_register()
*/
void icc_clk_unregister(struct icc_provider *provider)
{
struct icc_clk_provider *qp = container_of(provider, struct icc_clk_provider, provider);
int i;
icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
for (i = 0; i < qp->num_clocks; i++) {
struct icc_clk_node *qn = &qp->clocks[i];
if (qn->enabled)
clk_disable_unprepare(qn->clk);
}
}
EXPORT_SYMBOL_GPL(icc_clk_unregister);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Interconnect wrapper for clocks");
MODULE_AUTHOR("Dmitry Baryshkov <[email protected]>");
| linux-master | drivers/interconnect/icc-clk.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/debugfs.h>
#include <linux/interconnect.h>
#include <linux/platform_device.h>
#include "internal.h"
/*
* This can be dangerous, therefore don't provide any real compile time
* configuration option for this feature.
* People who want to use this will need to modify the source code directly.
*/
#undef INTERCONNECT_ALLOW_WRITE_DEBUGFS
#if defined(INTERCONNECT_ALLOW_WRITE_DEBUGFS) && defined(CONFIG_DEBUG_FS)
static LIST_HEAD(debugfs_paths);
static DEFINE_MUTEX(debugfs_lock);
static struct platform_device *pdev;
static struct icc_path *cur_path;
static char *src_node;
static char *dst_node;
static u32 avg_bw;
static u32 peak_bw;
static u32 tag;
struct debugfs_path {
const char *src;
const char *dst;
struct icc_path *path;
struct list_head list;
};
static struct icc_path *get_path(const char *src, const char *dst)
{
struct debugfs_path *path;
list_for_each_entry(path, &debugfs_paths, list) {
if (!strcmp(path->src, src) && !strcmp(path->dst, dst))
return path->path;
}
return NULL;
}
static int icc_get_set(void *data, u64 val)
{
struct debugfs_path *debugfs_path;
char *src, *dst;
int ret = 0;
mutex_lock(&debugfs_lock);
rcu_read_lock();
src = rcu_dereference(src_node);
dst = rcu_dereference(dst_node);
/*
* If we've already looked up a path, then use the existing one instead
* of calling icc_get() again. This allows for updating previous BW
* votes when "get" is written to multiple times for multiple paths.
*/
cur_path = get_path(src, dst);
if (cur_path) {
rcu_read_unlock();
goto out;
}
src = kstrdup(src, GFP_ATOMIC);
dst = kstrdup(dst, GFP_ATOMIC);
rcu_read_unlock();
if (!src || !dst) {
ret = -ENOMEM;
goto err_free;
}
cur_path = icc_get(&pdev->dev, src, dst);
if (IS_ERR(cur_path)) {
ret = PTR_ERR(cur_path);
goto err_free;
}
debugfs_path = kzalloc(sizeof(*debugfs_path), GFP_KERNEL);
if (!debugfs_path) {
ret = -ENOMEM;
goto err_put;
}
debugfs_path->path = cur_path;
debugfs_path->src = src;
debugfs_path->dst = dst;
list_add_tail(&debugfs_path->list, &debugfs_paths);
goto out;
err_put:
icc_put(cur_path);
err_free:
kfree(src);
kfree(dst);
out:
mutex_unlock(&debugfs_lock);
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(icc_get_fops, NULL, icc_get_set, "%llu\n");
static int icc_commit_set(void *data, u64 val)
{
int ret;
mutex_lock(&debugfs_lock);
if (IS_ERR_OR_NULL(cur_path)) {
ret = PTR_ERR(cur_path);
goto out;
}
icc_set_tag(cur_path, tag);
ret = icc_set_bw(cur_path, avg_bw, peak_bw);
out:
mutex_unlock(&debugfs_lock);
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(icc_commit_fops, NULL, icc_commit_set, "%llu\n");
int icc_debugfs_client_init(struct dentry *icc_dir)
{
struct dentry *client_dir;
int ret;
pdev = platform_device_alloc("icc-debugfs-client", PLATFORM_DEVID_NONE);
ret = platform_device_add(pdev);
if (ret) {
pr_err("%s: failed to add platform device: %d\n", __func__, ret);
platform_device_put(pdev);
return ret;
}
client_dir = debugfs_create_dir("test_client", icc_dir);
debugfs_create_str("src_node", 0600, client_dir, &src_node);
debugfs_create_str("dst_node", 0600, client_dir, &dst_node);
debugfs_create_file("get", 0200, client_dir, NULL, &icc_get_fops);
debugfs_create_u32("avg_bw", 0600, client_dir, &avg_bw);
debugfs_create_u32("peak_bw", 0600, client_dir, &peak_bw);
debugfs_create_u32("tag", 0600, client_dir, &tag);
debugfs_create_file("commit", 0200, client_dir, NULL, &icc_commit_fops);
return 0;
}
#else
int icc_debugfs_client_init(struct dentry *icc_dir)
{
return 0;
}
#endif
| linux-master | drivers/interconnect/debugfs-client.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Interconnect framework core driver
*
* Copyright (c) 2017-2019, Linaro Ltd.
* Author: Georgi Djakov <[email protected]>
*/
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/overflow.h>
#include "internal.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
static DEFINE_IDR(icc_idr);
static LIST_HEAD(icc_providers);
static int providers_count;
static bool synced_state;
static DEFINE_MUTEX(icc_lock);
static DEFINE_MUTEX(icc_bw_lock);
static struct dentry *icc_debugfs_dir;
static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
{
if (!n)
return;
seq_printf(s, "%-42s %12u %12u\n",
n->name, n->avg_bw, n->peak_bw);
}
static int icc_summary_show(struct seq_file *s, void *data)
{
struct icc_provider *provider;
seq_puts(s, " node tag avg peak\n");
seq_puts(s, "--------------------------------------------------------------------\n");
mutex_lock(&icc_lock);
list_for_each_entry(provider, &icc_providers, provider_list) {
struct icc_node *n;
list_for_each_entry(n, &provider->nodes, node_list) {
struct icc_req *r;
icc_summary_show_one(s, n);
hlist_for_each_entry(r, &n->req_list, req_node) {
u32 avg_bw = 0, peak_bw = 0;
if (!r->dev)
continue;
if (r->enabled) {
avg_bw = r->avg_bw;
peak_bw = r->peak_bw;
}
seq_printf(s, " %-27s %12u %12u %12u\n",
dev_name(r->dev), r->tag, avg_bw, peak_bw);
}
}
}
mutex_unlock(&icc_lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(icc_summary);
static void icc_graph_show_link(struct seq_file *s, int level,
struct icc_node *n, struct icc_node *m)
{
seq_printf(s, "%s\"%d:%s\" -> \"%d:%s\"\n",
level == 2 ? "\t\t" : "\t",
n->id, n->name, m->id, m->name);
}
static void icc_graph_show_node(struct seq_file *s, struct icc_node *n)
{
seq_printf(s, "\t\t\"%d:%s\" [label=\"%d:%s",
n->id, n->name, n->id, n->name);
seq_printf(s, "\n\t\t\t|avg_bw=%ukBps", n->avg_bw);
seq_printf(s, "\n\t\t\t|peak_bw=%ukBps", n->peak_bw);
seq_puts(s, "\"]\n");
}
static int icc_graph_show(struct seq_file *s, void *data)
{
struct icc_provider *provider;
struct icc_node *n;
int cluster_index = 0;
int i;
seq_puts(s, "digraph {\n\trankdir = LR\n\tnode [shape = record]\n");
mutex_lock(&icc_lock);
/* draw providers as cluster subgraphs */
cluster_index = 0;
list_for_each_entry(provider, &icc_providers, provider_list) {
seq_printf(s, "\tsubgraph cluster_%d {\n", ++cluster_index);
if (provider->dev)
seq_printf(s, "\t\tlabel = \"%s\"\n",
dev_name(provider->dev));
/* draw nodes */
list_for_each_entry(n, &provider->nodes, node_list)
icc_graph_show_node(s, n);
/* draw internal links */
list_for_each_entry(n, &provider->nodes, node_list)
for (i = 0; i < n->num_links; ++i)
if (n->provider == n->links[i]->provider)
icc_graph_show_link(s, 2, n,
n->links[i]);
seq_puts(s, "\t}\n");
}
/* draw external links */
list_for_each_entry(provider, &icc_providers, provider_list)
list_for_each_entry(n, &provider->nodes, node_list)
for (i = 0; i < n->num_links; ++i)
if (n->provider != n->links[i]->provider)
icc_graph_show_link(s, 1, n,
n->links[i]);
mutex_unlock(&icc_lock);
seq_puts(s, "}");
return 0;
}
DEFINE_SHOW_ATTRIBUTE(icc_graph);
static struct icc_node *node_find(const int id)
{
return idr_find(&icc_idr, id);
}
static struct icc_node *node_find_by_name(const char *name)
{
struct icc_provider *provider;
struct icc_node *n;
list_for_each_entry(provider, &icc_providers, provider_list) {
list_for_each_entry(n, &provider->nodes, node_list) {
if (!strcmp(n->name, name))
return n;
}
}
return NULL;
}
static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
ssize_t num_nodes)
{
struct icc_node *node = dst;
struct icc_path *path;
int i;
path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL);
if (!path)
return ERR_PTR(-ENOMEM);
path->num_nodes = num_nodes;
for (i = num_nodes - 1; i >= 0; i--) {
node->provider->users++;
hlist_add_head(&path->reqs[i].req_node, &node->req_list);
path->reqs[i].node = node;
path->reqs[i].dev = dev;
path->reqs[i].enabled = true;
/* reference to previous node was saved during path traversal */
node = node->reverse;
}
return path;
}
static struct icc_path *path_find(struct device *dev, struct icc_node *src,
struct icc_node *dst)
{
struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
struct icc_node *n, *node = NULL;
struct list_head traverse_list;
struct list_head edge_list;
struct list_head visited_list;
size_t i, depth = 1;
bool found = false;
INIT_LIST_HEAD(&traverse_list);
INIT_LIST_HEAD(&edge_list);
INIT_LIST_HEAD(&visited_list);
list_add(&src->search_list, &traverse_list);
src->reverse = NULL;
do {
list_for_each_entry_safe(node, n, &traverse_list, search_list) {
if (node == dst) {
found = true;
list_splice_init(&edge_list, &visited_list);
list_splice_init(&traverse_list, &visited_list);
break;
}
for (i = 0; i < node->num_links; i++) {
struct icc_node *tmp = node->links[i];
if (!tmp) {
path = ERR_PTR(-ENOENT);
goto out;
}
if (tmp->is_traversed)
continue;
tmp->is_traversed = true;
tmp->reverse = node;
list_add_tail(&tmp->search_list, &edge_list);
}
}
if (found)
break;
list_splice_init(&traverse_list, &visited_list);
list_splice_init(&edge_list, &traverse_list);
/* count the hops including the source */
depth++;
} while (!list_empty(&traverse_list));
out:
/* reset the traversed state */
list_for_each_entry_reverse(n, &visited_list, search_list)
n->is_traversed = false;
if (found)
path = path_init(dev, dst, depth);
return path;
}
/*
* We want the path to honor all bandwidth requests, so the average and peak
* bandwidth requirements from each consumer are aggregated at each node.
* The aggregation is platform specific, so each platform can customize it by
* implementing its own aggregate() function.
*/
static int aggregate_requests(struct icc_node *node)
{
struct icc_provider *p = node->provider;
struct icc_req *r;
u32 avg_bw, peak_bw;
node->avg_bw = 0;
node->peak_bw = 0;
if (p->pre_aggregate)
p->pre_aggregate(node);
hlist_for_each_entry(r, &node->req_list, req_node) {
if (r->enabled) {
avg_bw = r->avg_bw;
peak_bw = r->peak_bw;
} else {
avg_bw = 0;
peak_bw = 0;
}
p->aggregate(node, r->tag, avg_bw, peak_bw,
&node->avg_bw, &node->peak_bw);
/* during boot use the initial bandwidth as a floor value */
if (!synced_state) {
node->avg_bw = max(node->avg_bw, node->init_avg);
node->peak_bw = max(node->peak_bw, node->init_peak);
}
}
return 0;
}
static int apply_constraints(struct icc_path *path)
{
struct icc_node *next, *prev = NULL;
struct icc_provider *p;
int ret = -EINVAL;
int i;
for (i = 0; i < path->num_nodes; i++) {
next = path->reqs[i].node;
p = next->provider;
/* both endpoints should be valid master-slave pairs */
if (!prev || (p != prev->provider && !p->inter_set)) {
prev = next;
continue;
}
/* set the constraints */
ret = p->set(prev, next);
if (ret)
goto out;
prev = next;
}
out:
return ret;
}
int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
{
*agg_avg += avg_bw;
*agg_peak = max(*agg_peak, peak_bw);
return 0;
}
EXPORT_SYMBOL_GPL(icc_std_aggregate);
/* of_icc_xlate_onecell() - Translate function using a single index.
* @spec: OF phandle args to map into an interconnect node.
* @data: private data (pointer to struct icc_onecell_data)
*
* This is a generic translate function that can be used to model simple
* interconnect providers that have one device tree node and provide
* multiple interconnect nodes. A single cell is used as an index into
* an array of icc nodes specified in the icc_onecell_data struct when
* registering the provider.
*/
struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
void *data)
{
struct icc_onecell_data *icc_data = data;
unsigned int idx = spec->args[0];
if (idx >= icc_data->num_nodes) {
pr_err("%s: invalid index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
return icc_data->nodes[idx];
}
EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
/**
* of_icc_get_from_provider() - Look-up interconnect node
* @spec: OF phandle args to use for look-up
*
* Looks for interconnect provider under the node specified by @spec and if
* found, uses xlate function of the provider to map phandle args to node.
*
* Returns a valid pointer to struct icc_node_data on success or ERR_PTR()
* on failure.
*/
struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
{
struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
struct icc_node_data *data = NULL;
struct icc_provider *provider;
if (!spec)
return ERR_PTR(-EINVAL);
mutex_lock(&icc_lock);
list_for_each_entry(provider, &icc_providers, provider_list) {
if (provider->dev->of_node == spec->np) {
if (provider->xlate_extended) {
data = provider->xlate_extended(spec, provider->data);
if (!IS_ERR(data)) {
node = data->node;
break;
}
} else {
node = provider->xlate(spec, provider->data);
if (!IS_ERR(node))
break;
}
}
}
mutex_unlock(&icc_lock);
if (IS_ERR(node))
return ERR_CAST(node);
if (!data) {
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
data->node = node;
}
return data;
}
EXPORT_SYMBOL_GPL(of_icc_get_from_provider);
static void devm_icc_release(struct device *dev, void *res)
{
icc_put(*(struct icc_path **)res);
}
struct icc_path *devm_of_icc_get(struct device *dev, const char *name)
{
struct icc_path **ptr, *path;
ptr = devres_alloc(devm_icc_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
path = of_icc_get(dev, name);
if (!IS_ERR(path)) {
*ptr = path;
devres_add(dev, ptr);
} else {
devres_free(ptr);
}
return path;
}
EXPORT_SYMBOL_GPL(devm_of_icc_get);
/**
* of_icc_get_by_index() - get a path handle from a DT node based on index
* @dev: device pointer for the consumer device
* @idx: interconnect path index
*
* This function will search for a path between two endpoints and return an
* icc_path handle on success. Use icc_put() to release constraints when they
* are not needed anymore.
* If the interconnect API is disabled, NULL is returned and the consumer
* drivers will still build. Drivers are free to handle this specifically,
* but they don't have to.
*
* Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
* when the API is disabled or the "interconnects" DT property is missing.
*/
struct icc_path *of_icc_get_by_index(struct device *dev, int idx)
{
struct icc_path *path;
struct icc_node_data *src_data, *dst_data;
struct device_node *np;
struct of_phandle_args src_args, dst_args;
int ret;
if (!dev || !dev->of_node)
return ERR_PTR(-ENODEV);
np = dev->of_node;
/*
* When the consumer DT node do not have "interconnects" property
* return a NULL path to skip setting constraints.
*/
if (!of_property_present(np, "interconnects"))
return NULL;
/*
* We use a combination of phandle and specifier for endpoint. For now
* lets support only global ids and extend this in the future if needed
* without breaking DT compatibility.
*/
ret = of_parse_phandle_with_args(np, "interconnects",
"#interconnect-cells", idx * 2,
&src_args);
if (ret)
return ERR_PTR(ret);
of_node_put(src_args.np);
ret = of_parse_phandle_with_args(np, "interconnects",
"#interconnect-cells", idx * 2 + 1,
&dst_args);
if (ret)
return ERR_PTR(ret);
of_node_put(dst_args.np);
src_data = of_icc_get_from_provider(&src_args);
if (IS_ERR(src_data)) {
dev_err_probe(dev, PTR_ERR(src_data), "error finding src node\n");
return ERR_CAST(src_data);
}
dst_data = of_icc_get_from_provider(&dst_args);
if (IS_ERR(dst_data)) {
dev_err_probe(dev, PTR_ERR(dst_data), "error finding dst node\n");
kfree(src_data);
return ERR_CAST(dst_data);
}
mutex_lock(&icc_lock);
path = path_find(dev, src_data->node, dst_data->node);
mutex_unlock(&icc_lock);
if (IS_ERR(path)) {
dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
goto free_icc_data;
}
if (src_data->tag && src_data->tag == dst_data->tag)
icc_set_tag(path, src_data->tag);
path->name = kasprintf(GFP_KERNEL, "%s-%s",
src_data->node->name, dst_data->node->name);
if (!path->name) {
kfree(path);
path = ERR_PTR(-ENOMEM);
}
free_icc_data:
kfree(src_data);
kfree(dst_data);
return path;
}
EXPORT_SYMBOL_GPL(of_icc_get_by_index);
/**
* of_icc_get() - get a path handle from a DT node based on name
* @dev: device pointer for the consumer device
* @name: interconnect path name
*
* This function will search for a path between two endpoints and return an
* icc_path handle on success. Use icc_put() to release constraints when they
* are not needed anymore.
* If the interconnect API is disabled, NULL is returned and the consumer
* drivers will still build. Drivers are free to handle this specifically,
* but they don't have to.
*
* Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
* when the API is disabled or the "interconnects" DT property is missing.
*/
struct icc_path *of_icc_get(struct device *dev, const char *name)
{
struct device_node *np;
int idx = 0;
if (!dev || !dev->of_node)
return ERR_PTR(-ENODEV);
np = dev->of_node;
/*
* When the consumer DT node do not have "interconnects" property
* return a NULL path to skip setting constraints.
*/
if (!of_property_present(np, "interconnects"))
return NULL;
/*
* We use a combination of phandle and specifier for endpoint. For now
* lets support only global ids and extend this in the future if needed
* without breaking DT compatibility.
*/
if (name) {
idx = of_property_match_string(np, "interconnect-names", name);
if (idx < 0)
return ERR_PTR(idx);
}
return of_icc_get_by_index(dev, idx);
}
EXPORT_SYMBOL_GPL(of_icc_get);
/**
* icc_get() - get a path handle between two endpoints
* @dev: device pointer for the consumer device
* @src: source node name
* @dst: destination node name
*
* This function will search for a path between two endpoints and return an
* icc_path handle on success. Use icc_put() to release constraints when they
* are not needed anymore.
*
* Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
* when the API is disabled.
*/
struct icc_path *icc_get(struct device *dev, const char *src, const char *dst)
{
struct icc_node *src_node, *dst_node;
struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
mutex_lock(&icc_lock);
src_node = node_find_by_name(src);
if (!src_node) {
dev_err(dev, "%s: invalid src=%s\n", __func__, src);
goto out;
}
dst_node = node_find_by_name(dst);
if (!dst_node) {
dev_err(dev, "%s: invalid dst=%s\n", __func__, dst);
goto out;
}
path = path_find(dev, src_node, dst_node);
if (IS_ERR(path)) {
dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
goto out;
}
path->name = kasprintf(GFP_KERNEL, "%s-%s", src_node->name, dst_node->name);
if (!path->name) {
kfree(path);
path = ERR_PTR(-ENOMEM);
}
out:
mutex_unlock(&icc_lock);
return path;
}
/**
* icc_set_tag() - set an optional tag on a path
* @path: the path we want to tag
* @tag: the tag value
*
* This function allows consumers to append a tag to the requests associated
* with a path, so that a different aggregation could be done based on this tag.
*/
void icc_set_tag(struct icc_path *path, u32 tag)
{
int i;
if (!path)
return;
mutex_lock(&icc_lock);
for (i = 0; i < path->num_nodes; i++)
path->reqs[i].tag = tag;
mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_set_tag);
/**
* icc_get_name() - Get name of the icc path
* @path: interconnect path
*
* This function is used by an interconnect consumer to get the name of the icc
* path.
*
* Returns a valid pointer on success, or NULL otherwise.
*/
const char *icc_get_name(struct icc_path *path)
{
if (!path)
return NULL;
return path->name;
}
EXPORT_SYMBOL_GPL(icc_get_name);
/**
* icc_set_bw() - set bandwidth constraints on an interconnect path
* @path: interconnect path
* @avg_bw: average bandwidth in kilobytes per second
* @peak_bw: peak bandwidth in kilobytes per second
*
* This function is used by an interconnect consumer to express its own needs
* in terms of bandwidth for a previously requested path between two endpoints.
* The requests are aggregated and each node is updated accordingly. The entire
* path is locked by a mutex to ensure that the set() is completed.
* The @path can be NULL when the "interconnects" DT properties is missing,
* which will mean that no constraints will be set.
*
* Returns 0 on success, or an appropriate error code otherwise.
*/
int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
{
struct icc_node *node;
u32 old_avg, old_peak;
size_t i;
int ret;
if (!path)
return 0;
if (WARN_ON(IS_ERR(path) || !path->num_nodes))
return -EINVAL;
mutex_lock(&icc_bw_lock);
old_avg = path->reqs[0].avg_bw;
old_peak = path->reqs[0].peak_bw;
for (i = 0; i < path->num_nodes; i++) {
node = path->reqs[i].node;
/* update the consumer request for this path */
path->reqs[i].avg_bw = avg_bw;
path->reqs[i].peak_bw = peak_bw;
/* aggregate requests for this node */
aggregate_requests(node);
trace_icc_set_bw(path, node, i, avg_bw, peak_bw);
}
ret = apply_constraints(path);
if (ret) {
pr_debug("interconnect: error applying constraints (%d)\n",
ret);
for (i = 0; i < path->num_nodes; i++) {
node = path->reqs[i].node;
path->reqs[i].avg_bw = old_avg;
path->reqs[i].peak_bw = old_peak;
aggregate_requests(node);
}
apply_constraints(path);
}
mutex_unlock(&icc_bw_lock);
trace_icc_set_bw_end(path, ret);
return ret;
}
EXPORT_SYMBOL_GPL(icc_set_bw);
static int __icc_enable(struct icc_path *path, bool enable)
{
int i;
if (!path)
return 0;
if (WARN_ON(IS_ERR(path) || !path->num_nodes))
return -EINVAL;
mutex_lock(&icc_lock);
for (i = 0; i < path->num_nodes; i++)
path->reqs[i].enabled = enable;
mutex_unlock(&icc_lock);
return icc_set_bw(path, path->reqs[0].avg_bw,
path->reqs[0].peak_bw);
}
int icc_enable(struct icc_path *path)
{
return __icc_enable(path, true);
}
EXPORT_SYMBOL_GPL(icc_enable);
int icc_disable(struct icc_path *path)
{
return __icc_enable(path, false);
}
EXPORT_SYMBOL_GPL(icc_disable);
/**
* icc_put() - release the reference to the icc_path
* @path: interconnect path
*
* Use this function to release the constraints on a path when the path is
* no longer needed. The constraints will be re-aggregated.
*/
void icc_put(struct icc_path *path)
{
struct icc_node *node;
size_t i;
int ret;
if (!path || WARN_ON(IS_ERR(path)))
return;
ret = icc_set_bw(path, 0, 0);
if (ret)
pr_err("%s: error (%d)\n", __func__, ret);
mutex_lock(&icc_lock);
for (i = 0; i < path->num_nodes; i++) {
node = path->reqs[i].node;
hlist_del(&path->reqs[i].req_node);
if (!WARN_ON(!node->provider->users))
node->provider->users--;
}
mutex_unlock(&icc_lock);
kfree_const(path->name);
kfree(path);
}
EXPORT_SYMBOL_GPL(icc_put);
static struct icc_node *icc_node_create_nolock(int id)
{
struct icc_node *node;
/* check if node already exists */
node = node_find(id);
if (node)
return node;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return ERR_PTR(-ENOMEM);
id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL);
if (id < 0) {
WARN(1, "%s: couldn't get idr\n", __func__);
kfree(node);
return ERR_PTR(id);
}
node->id = id;
return node;
}
/**
* icc_node_create() - create a node
* @id: node id
*
* Return: icc_node pointer on success, or ERR_PTR() on error
*/
struct icc_node *icc_node_create(int id)
{
struct icc_node *node;
mutex_lock(&icc_lock);
node = icc_node_create_nolock(id);
mutex_unlock(&icc_lock);
return node;
}
EXPORT_SYMBOL_GPL(icc_node_create);
/**
* icc_node_destroy() - destroy a node
* @id: node id
*/
void icc_node_destroy(int id)
{
struct icc_node *node;
mutex_lock(&icc_lock);
node = node_find(id);
if (node) {
idr_remove(&icc_idr, node->id);
WARN_ON(!hlist_empty(&node->req_list));
}
mutex_unlock(&icc_lock);
if (!node)
return;
kfree(node->links);
kfree(node);
}
EXPORT_SYMBOL_GPL(icc_node_destroy);
/**
* icc_link_create() - create a link between two nodes
* @node: source node id
* @dst_id: destination node id
*
* Create a link between two nodes. The nodes might belong to different
* interconnect providers and the @dst_id node might not exist (if the
* provider driver has not probed yet). So just create the @dst_id node
* and when the actual provider driver is probed, the rest of the node
* data is filled.
*
* Return: 0 on success, or an error code otherwise
*/
int icc_link_create(struct icc_node *node, const int dst_id)
{
struct icc_node *dst;
struct icc_node **new;
int ret = 0;
if (!node->provider)
return -EINVAL;
mutex_lock(&icc_lock);
dst = node_find(dst_id);
if (!dst) {
dst = icc_node_create_nolock(dst_id);
if (IS_ERR(dst)) {
ret = PTR_ERR(dst);
goto out;
}
}
new = krealloc(node->links,
(node->num_links + 1) * sizeof(*node->links),
GFP_KERNEL);
if (!new) {
ret = -ENOMEM;
goto out;
}
node->links = new;
node->links[node->num_links++] = dst;
out:
mutex_unlock(&icc_lock);
return ret;
}
EXPORT_SYMBOL_GPL(icc_link_create);
/**
* icc_node_add() - add interconnect node to interconnect provider
* @node: pointer to the interconnect node
* @provider: pointer to the interconnect provider
*/
void icc_node_add(struct icc_node *node, struct icc_provider *provider)
{
if (WARN_ON(node->provider))
return;
mutex_lock(&icc_lock);
mutex_lock(&icc_bw_lock);
node->provider = provider;
list_add_tail(&node->node_list, &provider->nodes);
/* get the initial bandwidth values and sync them with hardware */
if (provider->get_bw) {
provider->get_bw(node, &node->init_avg, &node->init_peak);
} else {
node->init_avg = INT_MAX;
node->init_peak = INT_MAX;
}
node->avg_bw = node->init_avg;
node->peak_bw = node->init_peak;
if (node->avg_bw || node->peak_bw) {
if (provider->pre_aggregate)
provider->pre_aggregate(node);
if (provider->aggregate)
provider->aggregate(node, 0, node->init_avg, node->init_peak,
&node->avg_bw, &node->peak_bw);
if (provider->set)
provider->set(node, node);
}
node->avg_bw = 0;
node->peak_bw = 0;
mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_node_add);
/**
* icc_node_del() - delete interconnect node from interconnect provider
* @node: pointer to the interconnect node
*/
void icc_node_del(struct icc_node *node)
{
mutex_lock(&icc_lock);
list_del(&node->node_list);
mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_node_del);
/**
* icc_nodes_remove() - remove all previously added nodes from provider
* @provider: the interconnect provider we are removing nodes from
*
* Return: 0 on success, or an error code otherwise
*/
int icc_nodes_remove(struct icc_provider *provider)
{
struct icc_node *n, *tmp;
if (WARN_ON(IS_ERR_OR_NULL(provider)))
return -EINVAL;
list_for_each_entry_safe_reverse(n, tmp, &provider->nodes, node_list) {
icc_node_del(n);
icc_node_destroy(n->id);
}
return 0;
}
EXPORT_SYMBOL_GPL(icc_nodes_remove);
/**
* icc_provider_init() - initialize a new interconnect provider
* @provider: the interconnect provider to initialize
*
* Must be called before adding nodes to the provider.
*/
void icc_provider_init(struct icc_provider *provider)
{
WARN_ON(!provider->set);
INIT_LIST_HEAD(&provider->nodes);
}
EXPORT_SYMBOL_GPL(icc_provider_init);
/**
* icc_provider_register() - register a new interconnect provider
* @provider: the interconnect provider to register
*
* Return: 0 on success, or an error code otherwise
*/
int icc_provider_register(struct icc_provider *provider)
{
if (WARN_ON(!provider->xlate && !provider->xlate_extended))
return -EINVAL;
mutex_lock(&icc_lock);
list_add_tail(&provider->provider_list, &icc_providers);
mutex_unlock(&icc_lock);
dev_dbg(provider->dev, "interconnect provider registered\n");
return 0;
}
EXPORT_SYMBOL_GPL(icc_provider_register);
/**
* icc_provider_deregister() - deregister an interconnect provider
* @provider: the interconnect provider to deregister
*/
void icc_provider_deregister(struct icc_provider *provider)
{
mutex_lock(&icc_lock);
WARN_ON(provider->users);
list_del(&provider->provider_list);
mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_provider_deregister);
static const struct of_device_id __maybe_unused ignore_list[] = {
{ .compatible = "qcom,sc7180-ipa-virt" },
{ .compatible = "qcom,sc8180x-ipa-virt" },
{ .compatible = "qcom,sdx55-ipa-virt" },
{ .compatible = "qcom,sm8150-ipa-virt" },
{ .compatible = "qcom,sm8250-ipa-virt" },
{}
};
static int of_count_icc_providers(struct device_node *np)
{
struct device_node *child;
int count = 0;
for_each_available_child_of_node(np, child) {
if (of_property_read_bool(child, "#interconnect-cells") &&
likely(!of_match_node(ignore_list, child)))
count++;
count += of_count_icc_providers(child);
}
return count;
}
void icc_sync_state(struct device *dev)
{
struct icc_provider *p;
struct icc_node *n;
static int count;
count++;
if (count < providers_count)
return;
mutex_lock(&icc_lock);
mutex_lock(&icc_bw_lock);
synced_state = true;
list_for_each_entry(p, &icc_providers, provider_list) {
dev_dbg(p->dev, "interconnect provider is in synced state\n");
list_for_each_entry(n, &p->nodes, node_list) {
if (n->init_avg || n->init_peak) {
n->init_avg = 0;
n->init_peak = 0;
aggregate_requests(n);
p->set(n, n);
}
}
}
mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_sync_state);
static int __init icc_init(void)
{
struct device_node *root;
/* Teach lockdep about lock ordering wrt. shrinker: */
fs_reclaim_acquire(GFP_KERNEL);
might_lock(&icc_bw_lock);
fs_reclaim_release(GFP_KERNEL);
root = of_find_node_by_path("/");
providers_count = of_count_icc_providers(root);
of_node_put(root);
icc_debugfs_dir = debugfs_create_dir("interconnect", NULL);
debugfs_create_file("interconnect_summary", 0444,
icc_debugfs_dir, NULL, &icc_summary_fops);
debugfs_create_file("interconnect_graph", 0444,
icc_debugfs_dir, NULL, &icc_graph_fops);
icc_debugfs_client_init(icc_debugfs_dir);
return 0;
}
device_initcall(icc_init);
| linux-master | drivers/interconnect/core.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/interconnect-provider.h>
#include <linux/device.h>
#include <linux/export.h>
/**
* of_icc_bulk_get() - get interconnect paths
* @dev: the device requesting the path
* @num_paths: the number of icc_bulk_data
* @paths: the table with the paths we want to get
*
* Returns 0 on success or negative errno otherwise.
*/
int __must_check of_icc_bulk_get(struct device *dev, int num_paths,
struct icc_bulk_data *paths)
{
int ret, i;
for (i = 0; i < num_paths; i++) {
paths[i].path = of_icc_get(dev, paths[i].name);
if (IS_ERR(paths[i].path)) {
ret = PTR_ERR(paths[i].path);
if (ret != -EPROBE_DEFER)
dev_err(dev, "of_icc_get() failed on path %s (%d)\n",
paths[i].name, ret);
paths[i].path = NULL;
goto err;
}
}
return 0;
err:
icc_bulk_put(i, paths);
return ret;
}
EXPORT_SYMBOL_GPL(of_icc_bulk_get);
/**
* icc_bulk_put() - put a list of interconnect paths
* @num_paths: the number of icc_bulk_data
* @paths: the icc_bulk_data table with the paths being put
*/
void icc_bulk_put(int num_paths, struct icc_bulk_data *paths)
{
while (--num_paths >= 0) {
icc_put(paths[num_paths].path);
paths[num_paths].path = NULL;
}
}
EXPORT_SYMBOL_GPL(icc_bulk_put);
/**
* icc_bulk_set_bw() - set bandwidth to a set of paths
* @num_paths: the number of icc_bulk_data
* @paths: the icc_bulk_data table containing the paths and bandwidth
*
* Returns 0 on success or negative errno otherwise.
*/
int icc_bulk_set_bw(int num_paths, const struct icc_bulk_data *paths)
{
int ret = 0;
int i;
for (i = 0; i < num_paths; i++) {
ret = icc_set_bw(paths[i].path, paths[i].avg_bw, paths[i].peak_bw);
if (ret) {
pr_err("icc_set_bw() failed on path %s (%d)\n", paths[i].name, ret);
return ret;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(icc_bulk_set_bw);
/**
* icc_bulk_enable() - enable a previously disabled set of paths
* @num_paths: the number of icc_bulk_data
* @paths: the icc_bulk_data table containing the paths and bandwidth
*
* Returns 0 on success or negative errno otherwise.
*/
int icc_bulk_enable(int num_paths, const struct icc_bulk_data *paths)
{
int ret, i;
for (i = 0; i < num_paths; i++) {
ret = icc_enable(paths[i].path);
if (ret) {
pr_err("icc_enable() failed on path %s (%d)\n", paths[i].name, ret);
goto err;
}
}
return 0;
err:
icc_bulk_disable(i, paths);
return ret;
}
EXPORT_SYMBOL_GPL(icc_bulk_enable);
/**
* icc_bulk_disable() - disable a set of interconnect paths
* @num_paths: the number of icc_bulk_data
* @paths: the icc_bulk_data table containing the paths and bandwidth
*/
void icc_bulk_disable(int num_paths, const struct icc_bulk_data *paths)
{
while (--num_paths >= 0)
icc_disable(paths[num_paths].path);
}
EXPORT_SYMBOL_GPL(icc_bulk_disable);
struct icc_bulk_devres {
struct icc_bulk_data *paths;
int num_paths;
};
static void devm_icc_bulk_release(struct device *dev, void *res)
{
struct icc_bulk_devres *devres = res;
icc_bulk_put(devres->num_paths, devres->paths);
}
/**
* devm_of_icc_bulk_get() - resource managed of_icc_bulk_get
* @dev: the device requesting the path
* @num_paths: the number of icc_bulk_data
* @paths: the table with the paths we want to get
*
* Returns 0 on success or negative errno otherwise.
*/
int devm_of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths)
{
struct icc_bulk_devres *devres;
int ret;
devres = devres_alloc(devm_icc_bulk_release, sizeof(*devres), GFP_KERNEL);
if (!devres)
return -ENOMEM;
ret = of_icc_bulk_get(dev, num_paths, paths);
if (!ret) {
devres->paths = paths;
devres->num_paths = num_paths;
devres_add(dev, devres);
} else {
devres_free(devres);
}
return ret;
}
EXPORT_SYMBOL_GPL(devm_of_icc_bulk_get);
| linux-master | drivers/interconnect/bulk.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Interconnect framework driver for i.MX8MP SoC
*
* Copyright 2022 NXP
* Peng Fan <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/fsl,imx8mp.h>
#include "imx.h"
static const struct imx_icc_node_adj_desc imx8mp_noc_adj = {
.bw_mul = 1,
.bw_div = 16,
.main_noc = true,
};
static struct imx_icc_noc_setting noc_setting_nodes[] = {
[IMX8MP_ICM_MLMIX] = {
.reg = 0x180,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_DSP] = {
.reg = 0x200,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_SDMA2PER] = {
.reg = 0x280,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 4,
},
[IMX8MP_ICM_SDMA2BURST] = {
.reg = 0x300,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 4,
},
[IMX8MP_ICM_SDMA3PER] = {
.reg = 0x380,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 4,
},
[IMX8MP_ICM_SDMA3BURST] = {
.reg = 0x400,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 4,
},
[IMX8MP_ICM_EDMA] = {
.reg = 0x480,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 4,
},
[IMX8MP_ICM_GPU3D] = {
.reg = 0x500,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_GPU2D] = {
.reg = 0x580,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_HRV] = {
.reg = 0x600,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_LCDIF_HDMI] = {
.reg = 0x680,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_HDCP] = {
.reg = 0x700,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 5,
},
[IMX8MP_ICM_NOC_PCIE] = {
.reg = 0x780,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_USB1] = {
.reg = 0x800,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_USB2] = {
.reg = 0x880,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_PCIE] = {
.reg = 0x900,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_LCDIF_RD] = {
.reg = 0x980,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_LCDIF_WR] = {
.reg = 0xa00,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_ISI0] = {
.reg = 0xa80,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_ISI1] = {
.reg = 0xb00,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_ISI2] = {
.reg = 0xb80,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_ISP0] = {
.reg = 0xc00,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 7,
},
[IMX8MP_ICM_ISP1] = {
.reg = 0xc80,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 7,
},
[IMX8MP_ICM_DWE] = {
.reg = 0xd00,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 7,
},
[IMX8MP_ICM_VPU_G1] = {
.reg = 0xd80,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_VPU_G2] = {
.reg = 0xe00,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_VPU_H1] = {
.reg = 0xe80,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICN_MEDIA] = {
.mode = IMX_NOC_MODE_UNCONFIGURED,
},
[IMX8MP_ICN_VIDEO] = {
.mode = IMX_NOC_MODE_UNCONFIGURED,
},
[IMX8MP_ICN_AUDIO] = {
.mode = IMX_NOC_MODE_UNCONFIGURED,
},
[IMX8MP_ICN_HDMI] = {
.mode = IMX_NOC_MODE_UNCONFIGURED,
},
[IMX8MP_ICN_GPU] = {
.mode = IMX_NOC_MODE_UNCONFIGURED,
},
[IMX8MP_ICN_HSIO] = {
.mode = IMX_NOC_MODE_UNCONFIGURED,
},
};
/* Describe bus masters, slaves and connections between them */
static struct imx_icc_node_desc nodes[] = {
DEFINE_BUS_INTERCONNECT("NOC", IMX8MP_ICN_NOC, &imx8mp_noc_adj,
IMX8MP_ICS_DRAM, IMX8MP_ICN_MAIN),
DEFINE_BUS_SLAVE("OCRAM", IMX8MP_ICS_OCRAM, NULL),
DEFINE_BUS_SLAVE("DRAM", IMX8MP_ICS_DRAM, NULL),
DEFINE_BUS_MASTER("A53", IMX8MP_ICM_A53, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("SUPERMIX", IMX8MP_ICM_SUPERMIX, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("GIC", IMX8MP_ICM_GIC, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("MLMIX", IMX8MP_ICM_MLMIX, IMX8MP_ICN_NOC),
DEFINE_BUS_INTERCONNECT("NOC_AUDIO", IMX8MP_ICN_AUDIO, NULL, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("DSP", IMX8MP_ICM_DSP, IMX8MP_ICN_AUDIO),
DEFINE_BUS_MASTER("SDMA2PER", IMX8MP_ICM_SDMA2PER, IMX8MP_ICN_AUDIO),
DEFINE_BUS_MASTER("SDMA2BURST", IMX8MP_ICM_SDMA2BURST, IMX8MP_ICN_AUDIO),
DEFINE_BUS_MASTER("SDMA3PER", IMX8MP_ICM_SDMA3PER, IMX8MP_ICN_AUDIO),
DEFINE_BUS_MASTER("SDMA3BURST", IMX8MP_ICM_SDMA3BURST, IMX8MP_ICN_AUDIO),
DEFINE_BUS_MASTER("EDMA", IMX8MP_ICM_EDMA, IMX8MP_ICN_AUDIO),
DEFINE_BUS_INTERCONNECT("NOC_GPU", IMX8MP_ICN_GPU, NULL, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("GPU 2D", IMX8MP_ICM_GPU2D, IMX8MP_ICN_GPU),
DEFINE_BUS_MASTER("GPU 3D", IMX8MP_ICM_GPU3D, IMX8MP_ICN_GPU),
DEFINE_BUS_INTERCONNECT("NOC_HDMI", IMX8MP_ICN_HDMI, NULL, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("HRV", IMX8MP_ICM_HRV, IMX8MP_ICN_HDMI),
DEFINE_BUS_MASTER("LCDIF_HDMI", IMX8MP_ICM_LCDIF_HDMI, IMX8MP_ICN_HDMI),
DEFINE_BUS_MASTER("HDCP", IMX8MP_ICM_HDCP, IMX8MP_ICN_HDMI),
DEFINE_BUS_INTERCONNECT("NOC_HSIO", IMX8MP_ICN_HSIO, NULL, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("NOC_PCIE", IMX8MP_ICM_NOC_PCIE, IMX8MP_ICN_HSIO),
DEFINE_BUS_MASTER("USB1", IMX8MP_ICM_USB1, IMX8MP_ICN_HSIO),
DEFINE_BUS_MASTER("USB2", IMX8MP_ICM_USB2, IMX8MP_ICN_HSIO),
DEFINE_BUS_MASTER("PCIE", IMX8MP_ICM_PCIE, IMX8MP_ICN_HSIO),
DEFINE_BUS_INTERCONNECT("NOC_MEDIA", IMX8MP_ICN_MEDIA, NULL, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("LCDIF_RD", IMX8MP_ICM_LCDIF_RD, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("LCDIF_WR", IMX8MP_ICM_LCDIF_WR, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("ISI0", IMX8MP_ICM_ISI0, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("ISI1", IMX8MP_ICM_ISI1, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("ISI2", IMX8MP_ICM_ISI2, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("ISP0", IMX8MP_ICM_ISP0, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("ISP1", IMX8MP_ICM_ISP1, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("DWE", IMX8MP_ICM_DWE, IMX8MP_ICN_MEDIA),
DEFINE_BUS_INTERCONNECT("NOC_VIDEO", IMX8MP_ICN_VIDEO, NULL, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("VPU G1", IMX8MP_ICM_VPU_G1, IMX8MP_ICN_VIDEO),
DEFINE_BUS_MASTER("VPU G2", IMX8MP_ICM_VPU_G2, IMX8MP_ICN_VIDEO),
DEFINE_BUS_MASTER("VPU H1", IMX8MP_ICM_VPU_H1, IMX8MP_ICN_VIDEO),
DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MP_ICN_MAIN, NULL,
IMX8MP_ICN_NOC, IMX8MP_ICS_OCRAM),
};
static int imx8mp_icc_probe(struct platform_device *pdev)
{
return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), noc_setting_nodes);
}
static int imx8mp_icc_remove(struct platform_device *pdev)
{
imx_icc_unregister(pdev);
return 0;
}
static struct platform_driver imx8mp_icc_driver = {
.probe = imx8mp_icc_probe,
.remove = imx8mp_icc_remove,
.driver = {
.name = "imx8mp-interconnect",
},
};
module_platform_driver(imx8mp_icc_driver);
MODULE_AUTHOR("Peng Fan <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx8mp-interconnect");
| linux-master | drivers/interconnect/imx/imx8mp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Interconnect framework driver for i.MX8MN SoC
*
* Copyright (c) 2019-2020, NXP
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/imx8mn.h>
#include "imx.h"
static const struct imx_icc_node_adj_desc imx8mn_dram_adj = {
.bw_mul = 1,
.bw_div = 4,
.phandle_name = "fsl,ddrc",
};
static const struct imx_icc_node_adj_desc imx8mn_noc_adj = {
.bw_mul = 1,
.bw_div = 4,
.main_noc = true,
};
/*
* Describe bus masters, slaves and connections between them
*
* This is a simplified subset of the bus diagram, there are several other
* PL301 nics which are skipped/merged into PL301_MAIN
*/
static struct imx_icc_node_desc nodes[] = {
DEFINE_BUS_INTERCONNECT("NOC", IMX8MN_ICN_NOC, &imx8mn_noc_adj,
IMX8MN_ICS_DRAM, IMX8MN_ICN_MAIN),
DEFINE_BUS_SLAVE("DRAM", IMX8MN_ICS_DRAM, &imx8mn_dram_adj),
DEFINE_BUS_SLAVE("OCRAM", IMX8MN_ICS_OCRAM, NULL),
DEFINE_BUS_MASTER("A53", IMX8MN_ICM_A53, IMX8MN_ICN_NOC),
/* GPUMIX */
DEFINE_BUS_MASTER("GPU", IMX8MN_ICM_GPU, IMX8MN_ICN_GPU),
DEFINE_BUS_INTERCONNECT("PL301_GPU", IMX8MN_ICN_GPU, NULL, IMX8MN_ICN_NOC),
/* DISPLAYMIX */
DEFINE_BUS_MASTER("CSI1", IMX8MN_ICM_CSI1, IMX8MN_ICN_MIPI),
DEFINE_BUS_MASTER("CSI2", IMX8MN_ICM_CSI2, IMX8MN_ICN_MIPI),
DEFINE_BUS_MASTER("ISI", IMX8MN_ICM_ISI, IMX8MN_ICN_MIPI),
DEFINE_BUS_MASTER("LCDIF", IMX8MN_ICM_LCDIF, IMX8MN_ICN_MIPI),
DEFINE_BUS_INTERCONNECT("PL301_MIPI", IMX8MN_ICN_MIPI, NULL, IMX8MN_ICN_NOC),
/* USB goes straight to NOC */
DEFINE_BUS_MASTER("USB", IMX8MN_ICM_USB, IMX8MN_ICN_NOC),
/* Audio */
DEFINE_BUS_MASTER("SDMA2", IMX8MN_ICM_SDMA2, IMX8MN_ICN_AUDIO),
DEFINE_BUS_MASTER("SDMA3", IMX8MN_ICM_SDMA3, IMX8MN_ICN_AUDIO),
DEFINE_BUS_INTERCONNECT("PL301_AUDIO", IMX8MN_ICN_AUDIO, NULL, IMX8MN_ICN_MAIN),
/* Ethernet */
DEFINE_BUS_MASTER("ENET", IMX8MN_ICM_ENET, IMX8MN_ICN_ENET),
DEFINE_BUS_INTERCONNECT("PL301_ENET", IMX8MN_ICN_ENET, NULL, IMX8MN_ICN_MAIN),
/* Other */
DEFINE_BUS_MASTER("SDMA1", IMX8MN_ICM_SDMA1, IMX8MN_ICN_MAIN),
DEFINE_BUS_MASTER("NAND", IMX8MN_ICM_NAND, IMX8MN_ICN_MAIN),
DEFINE_BUS_MASTER("USDHC1", IMX8MN_ICM_USDHC1, IMX8MN_ICN_MAIN),
DEFINE_BUS_MASTER("USDHC2", IMX8MN_ICM_USDHC2, IMX8MN_ICN_MAIN),
DEFINE_BUS_MASTER("USDHC3", IMX8MN_ICM_USDHC3, IMX8MN_ICN_MAIN),
DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MN_ICN_MAIN, NULL,
IMX8MN_ICN_NOC, IMX8MN_ICS_OCRAM),
};
static int imx8mn_icc_probe(struct platform_device *pdev)
{
return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL);
}
static int imx8mn_icc_remove(struct platform_device *pdev)
{
imx_icc_unregister(pdev);
return 0;
}
static struct platform_driver imx8mn_icc_driver = {
.probe = imx8mn_icc_probe,
.remove = imx8mn_icc_remove,
.driver = {
.name = "imx8mn-interconnect",
},
};
module_platform_driver(imx8mn_icc_driver);
MODULE_ALIAS("platform:imx8mn-interconnect");
MODULE_AUTHOR("Leonard Crestez <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/imx/imx8mn.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Interconnect framework driver for i.MX SoC
*
* Copyright (c) 2019, BayLibre
* Copyright (c) 2019-2020, NXP
* Author: Alexandre Bailon <[email protected]>
* Author: Leonard Crestez <[email protected]>
*/
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include "imx.h"
/* private icc_node data */
struct imx_icc_node {
const struct imx_icc_node_desc *desc;
const struct imx_icc_noc_setting *setting;
struct device *qos_dev;
struct dev_pm_qos_request qos_req;
struct imx_icc_provider *imx_provider;
};
static int imx_icc_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
{
*avg = 0;
*peak = 0;
return 0;
}
static int imx_icc_node_set(struct icc_node *node)
{
struct device *dev = node->provider->dev;
struct imx_icc_node *node_data = node->data;
void __iomem *base;
u32 prio;
u64 freq;
if (node_data->setting && node->peak_bw) {
base = node_data->setting->reg + node_data->imx_provider->noc_base;
if (node_data->setting->mode == IMX_NOC_MODE_FIXED) {
prio = node_data->setting->prio_level;
prio = PRIORITY_COMP_MARK | (prio << 8) | prio;
writel(prio, base + IMX_NOC_PRIO_REG);
writel(node_data->setting->mode, base + IMX_NOC_MODE_REG);
writel(node_data->setting->ext_control, base + IMX_NOC_EXT_CTL_REG);
dev_dbg(dev, "%s: mode: 0x%x, prio: 0x%x, ext_control: 0x%x\n",
node_data->desc->name, node_data->setting->mode, prio,
node_data->setting->ext_control);
} else if (node_data->setting->mode == IMX_NOC_MODE_UNCONFIGURED) {
dev_dbg(dev, "%s: mode not unconfigured\n", node_data->desc->name);
} else {
dev_info(dev, "%s: mode: %d not supported\n",
node_data->desc->name, node_data->setting->mode);
return -EOPNOTSUPP;
}
}
if (!node_data->qos_dev)
return 0;
freq = (node->avg_bw + node->peak_bw) * node_data->desc->adj->bw_mul;
do_div(freq, node_data->desc->adj->bw_div);
dev_dbg(dev, "node %s device %s avg_bw %ukBps peak_bw %ukBps min_freq %llukHz\n",
node->name, dev_name(node_data->qos_dev),
node->avg_bw, node->peak_bw, freq);
if (freq > S32_MAX) {
dev_err(dev, "%s can't request more than S32_MAX freq\n",
node->name);
return -ERANGE;
}
dev_pm_qos_update_request(&node_data->qos_req, freq);
return 0;
}
static int imx_icc_set(struct icc_node *src, struct icc_node *dst)
{
int ret;
ret = imx_icc_node_set(src);
if (ret)
return ret;
return imx_icc_node_set(dst);
}
/* imx_icc_node_destroy() - Destroy an imx icc_node, including private data */
static void imx_icc_node_destroy(struct icc_node *node)
{
struct imx_icc_node *node_data = node->data;
int ret;
if (dev_pm_qos_request_active(&node_data->qos_req)) {
ret = dev_pm_qos_remove_request(&node_data->qos_req);
if (ret)
dev_warn(node->provider->dev,
"failed to remove qos request for %s\n",
dev_name(node_data->qos_dev));
}
put_device(node_data->qos_dev);
icc_node_del(node);
icc_node_destroy(node->id);
}
static int imx_icc_node_init_qos(struct icc_provider *provider,
struct icc_node *node)
{
struct imx_icc_node *node_data = node->data;
const struct imx_icc_node_adj_desc *adj = node_data->desc->adj;
struct device *dev = provider->dev;
struct device_node *dn = NULL;
struct platform_device *pdev;
if (adj->main_noc) {
node_data->qos_dev = dev;
dev_dbg(dev, "icc node %s[%d] is main noc itself\n",
node->name, node->id);
} else {
dn = of_parse_phandle(dev->of_node, adj->phandle_name, 0);
if (!dn) {
dev_warn(dev, "Failed to parse %s\n",
adj->phandle_name);
return -ENODEV;
}
/* Allow scaling to be disabled on a per-node basis */
if (!of_device_is_available(dn)) {
dev_warn(dev, "Missing property %s, skip scaling %s\n",
adj->phandle_name, node->name);
of_node_put(dn);
return 0;
}
pdev = of_find_device_by_node(dn);
of_node_put(dn);
if (!pdev) {
dev_warn(dev, "node %s[%d] missing device for %pOF\n",
node->name, node->id, dn);
return -EPROBE_DEFER;
}
node_data->qos_dev = &pdev->dev;
dev_dbg(dev, "node %s[%d] has device node %pOF\n",
node->name, node->id, dn);
}
return dev_pm_qos_add_request(node_data->qos_dev,
&node_data->qos_req,
DEV_PM_QOS_MIN_FREQUENCY, 0);
}
static struct icc_node *imx_icc_node_add(struct imx_icc_provider *imx_provider,
const struct imx_icc_node_desc *node_desc,
const struct imx_icc_noc_setting *setting)
{
struct icc_provider *provider = &imx_provider->provider;
struct device *dev = provider->dev;
struct imx_icc_node *node_data;
struct icc_node *node;
int ret;
node = icc_node_create(node_desc->id);
if (IS_ERR(node)) {
dev_err(dev, "failed to create node %d\n", node_desc->id);
return node;
}
if (node->data) {
dev_err(dev, "already created node %s id=%d\n",
node_desc->name, node_desc->id);
return ERR_PTR(-EEXIST);
}
node_data = devm_kzalloc(dev, sizeof(*node_data), GFP_KERNEL);
if (!node_data) {
icc_node_destroy(node->id);
return ERR_PTR(-ENOMEM);
}
node->name = node_desc->name;
node->data = node_data;
node_data->desc = node_desc;
node_data->setting = setting;
node_data->imx_provider = imx_provider;
icc_node_add(node, provider);
if (node_desc->adj) {
ret = imx_icc_node_init_qos(provider, node);
if (ret < 0) {
imx_icc_node_destroy(node);
return ERR_PTR(ret);
}
}
return node;
}
static void imx_icc_unregister_nodes(struct icc_provider *provider)
{
struct icc_node *node, *tmp;
list_for_each_entry_safe(node, tmp, &provider->nodes, node_list)
imx_icc_node_destroy(node);
}
static int imx_icc_register_nodes(struct imx_icc_provider *imx_provider,
const struct imx_icc_node_desc *descs,
int count,
const struct imx_icc_noc_setting *settings)
{
struct icc_provider *provider = &imx_provider->provider;
struct icc_onecell_data *provider_data = provider->data;
int ret;
int i;
for (i = 0; i < count; i++) {
struct icc_node *node;
const struct imx_icc_node_desc *node_desc = &descs[i];
size_t j;
node = imx_icc_node_add(imx_provider, node_desc,
settings ? &settings[node_desc->id] : NULL);
if (IS_ERR(node)) {
ret = dev_err_probe(provider->dev, PTR_ERR(node),
"failed to add %s\n", node_desc->name);
goto err;
}
provider_data->nodes[node->id] = node;
for (j = 0; j < node_desc->num_links; j++) {
ret = icc_link_create(node, node_desc->links[j]);
if (ret) {
dev_err(provider->dev, "failed to link node %d to %d: %d\n",
node->id, node_desc->links[j], ret);
goto err;
}
}
}
return 0;
err:
imx_icc_unregister_nodes(provider);
return ret;
}
static int get_max_node_id(struct imx_icc_node_desc *nodes, int nodes_count)
{
int i, ret = 0;
for (i = 0; i < nodes_count; ++i)
if (nodes[i].id > ret)
ret = nodes[i].id;
return ret;
}
int imx_icc_register(struct platform_device *pdev,
struct imx_icc_node_desc *nodes, int nodes_count,
struct imx_icc_noc_setting *settings)
{
struct device *dev = &pdev->dev;
struct icc_onecell_data *data;
struct imx_icc_provider *imx_provider;
struct icc_provider *provider;
int num_nodes;
int ret;
/* icc_onecell_data is indexed by node_id, unlike nodes param */
num_nodes = get_max_node_id(nodes, nodes_count) + 1;
data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
GFP_KERNEL);
if (!data)
return -ENOMEM;
data->num_nodes = num_nodes;
imx_provider = devm_kzalloc(dev, sizeof(*imx_provider), GFP_KERNEL);
if (!imx_provider)
return -ENOMEM;
provider = &imx_provider->provider;
provider->set = imx_icc_set;
provider->get_bw = imx_icc_get_bw;
provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
provider->data = data;
provider->dev = dev->parent;
icc_provider_init(provider);
platform_set_drvdata(pdev, imx_provider);
if (settings) {
imx_provider->noc_base = devm_of_iomap(dev, provider->dev->of_node, 0, NULL);
if (IS_ERR(imx_provider->noc_base)) {
ret = PTR_ERR(imx_provider->noc_base);
dev_err(dev, "Error mapping NoC: %d\n", ret);
return ret;
}
}
ret = imx_icc_register_nodes(imx_provider, nodes, nodes_count, settings);
if (ret)
return ret;
ret = icc_provider_register(provider);
if (ret)
goto err_unregister_nodes;
return 0;
err_unregister_nodes:
imx_icc_unregister_nodes(&imx_provider->provider);
return ret;
}
EXPORT_SYMBOL_GPL(imx_icc_register);
void imx_icc_unregister(struct platform_device *pdev)
{
struct imx_icc_provider *imx_provider = platform_get_drvdata(pdev);
icc_provider_deregister(&imx_provider->provider);
imx_icc_unregister_nodes(&imx_provider->provider);
}
EXPORT_SYMBOL_GPL(imx_icc_unregister);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/imx/imx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Interconnect framework driver for i.MX8MM SoC
*
* Copyright (c) 2019, BayLibre
* Copyright (c) 2019-2020, NXP
* Author: Alexandre Bailon <[email protected]>
* Author: Leonard Crestez <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/imx8mm.h>
#include "imx.h"
static const struct imx_icc_node_adj_desc imx8mm_dram_adj = {
.bw_mul = 1,
.bw_div = 16,
.phandle_name = "fsl,ddrc",
};
static const struct imx_icc_node_adj_desc imx8mm_noc_adj = {
.bw_mul = 1,
.bw_div = 16,
.main_noc = true,
};
/*
* Describe bus masters, slaves and connections between them
*
* This is a simplified subset of the bus diagram, there are several other
* PL301 nics which are skipped/merged into PL301_MAIN
*/
static struct imx_icc_node_desc nodes[] = {
DEFINE_BUS_INTERCONNECT("NOC", IMX8MM_ICN_NOC, &imx8mm_noc_adj,
IMX8MM_ICS_DRAM, IMX8MM_ICN_MAIN),
DEFINE_BUS_SLAVE("DRAM", IMX8MM_ICS_DRAM, &imx8mm_dram_adj),
DEFINE_BUS_SLAVE("OCRAM", IMX8MM_ICS_OCRAM, NULL),
DEFINE_BUS_MASTER("A53", IMX8MM_ICM_A53, IMX8MM_ICN_NOC),
/* VPUMIX */
DEFINE_BUS_MASTER("VPU H1", IMX8MM_ICM_VPU_H1, IMX8MM_ICN_VIDEO),
DEFINE_BUS_MASTER("VPU G1", IMX8MM_ICM_VPU_G1, IMX8MM_ICN_VIDEO),
DEFINE_BUS_MASTER("VPU G2", IMX8MM_ICM_VPU_G2, IMX8MM_ICN_VIDEO),
DEFINE_BUS_INTERCONNECT("PL301_VIDEO", IMX8MM_ICN_VIDEO, NULL, IMX8MM_ICN_NOC),
/* GPUMIX */
DEFINE_BUS_MASTER("GPU 2D", IMX8MM_ICM_GPU2D, IMX8MM_ICN_GPU),
DEFINE_BUS_MASTER("GPU 3D", IMX8MM_ICM_GPU3D, IMX8MM_ICN_GPU),
DEFINE_BUS_INTERCONNECT("PL301_GPU", IMX8MM_ICN_GPU, NULL, IMX8MM_ICN_NOC),
/* DISPLAYMIX */
DEFINE_BUS_MASTER("CSI", IMX8MM_ICM_CSI, IMX8MM_ICN_MIPI),
DEFINE_BUS_MASTER("LCDIF", IMX8MM_ICM_LCDIF, IMX8MM_ICN_MIPI),
DEFINE_BUS_INTERCONNECT("PL301_MIPI", IMX8MM_ICN_MIPI, NULL, IMX8MM_ICN_NOC),
/* HSIO */
DEFINE_BUS_MASTER("USB1", IMX8MM_ICM_USB1, IMX8MM_ICN_HSIO),
DEFINE_BUS_MASTER("USB2", IMX8MM_ICM_USB2, IMX8MM_ICN_HSIO),
DEFINE_BUS_MASTER("PCIE", IMX8MM_ICM_PCIE, IMX8MM_ICN_HSIO),
DEFINE_BUS_INTERCONNECT("PL301_HSIO", IMX8MM_ICN_HSIO, NULL, IMX8MM_ICN_NOC),
/* Audio */
DEFINE_BUS_MASTER("SDMA2", IMX8MM_ICM_SDMA2, IMX8MM_ICN_AUDIO),
DEFINE_BUS_MASTER("SDMA3", IMX8MM_ICM_SDMA3, IMX8MM_ICN_AUDIO),
DEFINE_BUS_INTERCONNECT("PL301_AUDIO", IMX8MM_ICN_AUDIO, NULL, IMX8MM_ICN_MAIN),
/* Ethernet */
DEFINE_BUS_MASTER("ENET", IMX8MM_ICM_ENET, IMX8MM_ICN_ENET),
DEFINE_BUS_INTERCONNECT("PL301_ENET", IMX8MM_ICN_ENET, NULL, IMX8MM_ICN_MAIN),
/* Other */
DEFINE_BUS_MASTER("SDMA1", IMX8MM_ICM_SDMA1, IMX8MM_ICN_MAIN),
DEFINE_BUS_MASTER("NAND", IMX8MM_ICM_NAND, IMX8MM_ICN_MAIN),
DEFINE_BUS_MASTER("USDHC1", IMX8MM_ICM_USDHC1, IMX8MM_ICN_MAIN),
DEFINE_BUS_MASTER("USDHC2", IMX8MM_ICM_USDHC2, IMX8MM_ICN_MAIN),
DEFINE_BUS_MASTER("USDHC3", IMX8MM_ICM_USDHC3, IMX8MM_ICN_MAIN),
DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MM_ICN_MAIN, NULL,
IMX8MM_ICN_NOC, IMX8MM_ICS_OCRAM),
};
static int imx8mm_icc_probe(struct platform_device *pdev)
{
return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL);
}
static int imx8mm_icc_remove(struct platform_device *pdev)
{
imx_icc_unregister(pdev);
return 0;
}
static struct platform_driver imx8mm_icc_driver = {
.probe = imx8mm_icc_probe,
.remove = imx8mm_icc_remove,
.driver = {
.name = "imx8mm-interconnect",
},
};
module_platform_driver(imx8mm_icc_driver);
MODULE_AUTHOR("Alexandre Bailon <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:imx8mm-interconnect");
| linux-master | drivers/interconnect/imx/imx8mm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Interconnect framework driver for i.MX8MQ SoC
*
* Copyright (c) 2019-2020, NXP
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interconnect-provider.h>
#include <dt-bindings/interconnect/imx8mq.h>
#include "imx.h"
static const struct imx_icc_node_adj_desc imx8mq_dram_adj = {
.bw_mul = 1,
.bw_div = 4,
.phandle_name = "fsl,ddrc",
};
static const struct imx_icc_node_adj_desc imx8mq_noc_adj = {
.bw_mul = 1,
.bw_div = 4,
.main_noc = true,
};
/*
* Describe bus masters, slaves and connections between them
*
* This is a simplified subset of the bus diagram, there are several other
* PL301 nics which are skipped/merged into PL301_MAIN
*/
static struct imx_icc_node_desc nodes[] = {
DEFINE_BUS_INTERCONNECT("NOC", IMX8MQ_ICN_NOC, &imx8mq_noc_adj,
IMX8MQ_ICS_DRAM, IMX8MQ_ICN_MAIN),
DEFINE_BUS_SLAVE("DRAM", IMX8MQ_ICS_DRAM, &imx8mq_dram_adj),
DEFINE_BUS_SLAVE("OCRAM", IMX8MQ_ICS_OCRAM, NULL),
DEFINE_BUS_MASTER("A53", IMX8MQ_ICM_A53, IMX8MQ_ICN_NOC),
/* VPUMIX */
DEFINE_BUS_MASTER("VPU", IMX8MQ_ICM_VPU, IMX8MQ_ICN_VIDEO),
DEFINE_BUS_INTERCONNECT("PL301_VIDEO", IMX8MQ_ICN_VIDEO, NULL, IMX8MQ_ICN_NOC),
/* GPUMIX */
DEFINE_BUS_MASTER("GPU", IMX8MQ_ICM_GPU, IMX8MQ_ICN_GPU),
DEFINE_BUS_INTERCONNECT("PL301_GPU", IMX8MQ_ICN_GPU, NULL, IMX8MQ_ICN_NOC),
/* DISPMIX (only for DCSS) */
DEFINE_BUS_MASTER("DC", IMX8MQ_ICM_DCSS, IMX8MQ_ICN_DCSS),
DEFINE_BUS_INTERCONNECT("PL301_DC", IMX8MQ_ICN_DCSS, NULL, IMX8MQ_ICN_NOC),
/* USBMIX */
DEFINE_BUS_MASTER("USB1", IMX8MQ_ICM_USB1, IMX8MQ_ICN_USB),
DEFINE_BUS_MASTER("USB2", IMX8MQ_ICM_USB2, IMX8MQ_ICN_USB),
DEFINE_BUS_INTERCONNECT("PL301_USB", IMX8MQ_ICN_USB, NULL, IMX8MQ_ICN_NOC),
/* PL301_DISPLAY (IPs other than DCSS, inside SUPERMIX) */
DEFINE_BUS_MASTER("CSI1", IMX8MQ_ICM_CSI1, IMX8MQ_ICN_DISPLAY),
DEFINE_BUS_MASTER("CSI2", IMX8MQ_ICM_CSI2, IMX8MQ_ICN_DISPLAY),
DEFINE_BUS_MASTER("LCDIF", IMX8MQ_ICM_LCDIF, IMX8MQ_ICN_DISPLAY),
DEFINE_BUS_INTERCONNECT("PL301_DISPLAY", IMX8MQ_ICN_DISPLAY, NULL, IMX8MQ_ICN_MAIN),
/* AUDIO */
DEFINE_BUS_MASTER("SDMA2", IMX8MQ_ICM_SDMA2, IMX8MQ_ICN_AUDIO),
DEFINE_BUS_INTERCONNECT("PL301_AUDIO", IMX8MQ_ICN_AUDIO, NULL, IMX8MQ_ICN_DISPLAY),
/* ENET */
DEFINE_BUS_MASTER("ENET", IMX8MQ_ICM_ENET, IMX8MQ_ICN_ENET),
DEFINE_BUS_INTERCONNECT("PL301_ENET", IMX8MQ_ICN_ENET, NULL, IMX8MQ_ICN_MAIN),
/* OTHER */
DEFINE_BUS_MASTER("SDMA1", IMX8MQ_ICM_SDMA1, IMX8MQ_ICN_MAIN),
DEFINE_BUS_MASTER("NAND", IMX8MQ_ICM_NAND, IMX8MQ_ICN_MAIN),
DEFINE_BUS_MASTER("USDHC1", IMX8MQ_ICM_USDHC1, IMX8MQ_ICN_MAIN),
DEFINE_BUS_MASTER("USDHC2", IMX8MQ_ICM_USDHC2, IMX8MQ_ICN_MAIN),
DEFINE_BUS_MASTER("PCIE1", IMX8MQ_ICM_PCIE1, IMX8MQ_ICN_MAIN),
DEFINE_BUS_MASTER("PCIE2", IMX8MQ_ICM_PCIE2, IMX8MQ_ICN_MAIN),
DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MQ_ICN_MAIN, NULL,
IMX8MQ_ICN_NOC, IMX8MQ_ICS_OCRAM),
};
static int imx8mq_icc_probe(struct platform_device *pdev)
{
return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL);
}
static int imx8mq_icc_remove(struct platform_device *pdev)
{
imx_icc_unregister(pdev);
return 0;
}
static struct platform_driver imx8mq_icc_driver = {
.probe = imx8mq_icc_probe,
.remove = imx8mq_icc_remove,
.driver = {
.name = "imx8mq-interconnect",
.sync_state = icc_sync_state,
},
};
module_platform_driver(imx8mq_icc_driver);
MODULE_ALIAS("platform:imx8mq-interconnect");
MODULE_AUTHOR("Leonard Crestez <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/imx/imx8mq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Luca Weiss <[email protected]>
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sm6350.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sm6350.h"
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
.id = SM6350_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_SLAVE_SERVICE_A1NOC },
};
static struct qcom_icc_node qhm_qup_0 = {
.name = "qhm_qup_0",
.id = SM6350_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_emmc = {
.name = "xm_emmc",
.id = SM6350_MASTER_EMMC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SM6350_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
.id = SM6350_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_SLAVE_SERVICE_A2NOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SM6350_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_qup_1 = {
.name = "qhm_qup_1",
.id = SM6350_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SM6350_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SM6350_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
.id = SM6350_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
.id = SM6350_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.id = SM6350_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
.name = "qxm_camnoc_hf0_uncomp",
.id = SM6350_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM6350_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qxm_camnoc_icp_uncomp = {
.name = "qxm_camnoc_icp_uncomp",
.id = SM6350_MASTER_CAMNOC_ICP_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM6350_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
.name = "qxm_camnoc_sf_uncomp",
.id = SM6350_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM6350_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = SM6350_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_SLAVE_QUP_CORE_0 },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
.id = SM6350_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_SLAVE_QUP_CORE_1 },
};
static struct qcom_icc_node qnm_npu = {
.name = "qnm_npu",
.id = SM6350_MASTER_NPU,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM6350_SLAVE_CDSP_GEM_NOC },
};
static struct qcom_icc_node qxm_npu_dsp = {
.name = "qxm_npu_dsp",
.id = SM6350_MASTER_NPU_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_SLAVE_CDSP_GEM_NOC },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
.id = SM6350_SNOC_CNOC_MAS,
.channels = 1,
.buswidth = 8,
.num_links = 42,
.links = { SM6350_SLAVE_CAMERA_CFG,
SM6350_SLAVE_SDCC_2,
SM6350_SLAVE_CNOC_MNOC_CFG,
SM6350_SLAVE_UFS_MEM_CFG,
SM6350_SLAVE_QM_CFG,
SM6350_SLAVE_SNOC_CFG,
SM6350_SLAVE_QM_MPU_CFG,
SM6350_SLAVE_GLM,
SM6350_SLAVE_PDM,
SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG,
SM6350_SLAVE_A2NOC_CFG,
SM6350_SLAVE_QDSS_CFG,
SM6350_SLAVE_VSENSE_CTRL_CFG,
SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG,
SM6350_SLAVE_DISPLAY_CFG,
SM6350_SLAVE_TCSR,
SM6350_SLAVE_DCC_CFG,
SM6350_SLAVE_CNOC_DDRSS,
SM6350_SLAVE_DISPLAY_THROTTLE_CFG,
SM6350_SLAVE_NPU_CFG,
SM6350_SLAVE_AHB2PHY,
SM6350_SLAVE_GRAPHICS_3D_CFG,
SM6350_SLAVE_BOOT_ROM,
SM6350_SLAVE_VENUS_CFG,
SM6350_SLAVE_IPA_CFG,
SM6350_SLAVE_SECURITY,
SM6350_SLAVE_IMEM_CFG,
SM6350_SLAVE_CNOC_MSS,
SM6350_SLAVE_SERVICE_CNOC,
SM6350_SLAVE_USB3,
SM6350_SLAVE_VENUS_THROTTLE_CFG,
SM6350_SLAVE_RBCPR_CX_CFG,
SM6350_SLAVE_A1NOC_CFG,
SM6350_SLAVE_AOSS,
SM6350_SLAVE_PRNG,
SM6350_SLAVE_EMMC_CFG,
SM6350_SLAVE_CRYPTO_0_CFG,
SM6350_SLAVE_PIMEM_CFG,
SM6350_SLAVE_RBCPR_MX_CFG,
SM6350_SLAVE_QUP_0,
SM6350_SLAVE_QUP_1,
SM6350_SLAVE_CLK_CTL
},
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
.id = SM6350_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 42,
.links = { SM6350_SLAVE_CAMERA_CFG,
SM6350_SLAVE_SDCC_2,
SM6350_SLAVE_CNOC_MNOC_CFG,
SM6350_SLAVE_UFS_MEM_CFG,
SM6350_SLAVE_QM_CFG,
SM6350_SLAVE_SNOC_CFG,
SM6350_SLAVE_QM_MPU_CFG,
SM6350_SLAVE_GLM,
SM6350_SLAVE_PDM,
SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG,
SM6350_SLAVE_A2NOC_CFG,
SM6350_SLAVE_QDSS_CFG,
SM6350_SLAVE_VSENSE_CTRL_CFG,
SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG,
SM6350_SLAVE_DISPLAY_CFG,
SM6350_SLAVE_TCSR,
SM6350_SLAVE_DCC_CFG,
SM6350_SLAVE_CNOC_DDRSS,
SM6350_SLAVE_DISPLAY_THROTTLE_CFG,
SM6350_SLAVE_NPU_CFG,
SM6350_SLAVE_AHB2PHY,
SM6350_SLAVE_GRAPHICS_3D_CFG,
SM6350_SLAVE_BOOT_ROM,
SM6350_SLAVE_VENUS_CFG,
SM6350_SLAVE_IPA_CFG,
SM6350_SLAVE_SECURITY,
SM6350_SLAVE_IMEM_CFG,
SM6350_SLAVE_CNOC_MSS,
SM6350_SLAVE_SERVICE_CNOC,
SM6350_SLAVE_USB3,
SM6350_SLAVE_VENUS_THROTTLE_CFG,
SM6350_SLAVE_RBCPR_CX_CFG,
SM6350_SLAVE_A1NOC_CFG,
SM6350_SLAVE_AOSS,
SM6350_SLAVE_PRNG,
SM6350_SLAVE_EMMC_CFG,
SM6350_SLAVE_CRYPTO_0_CFG,
SM6350_SLAVE_PIMEM_CFG,
SM6350_SLAVE_RBCPR_MX_CFG,
SM6350_SLAVE_QUP_0,
SM6350_SLAVE_QUP_1,
SM6350_SLAVE_CLK_CTL
},
};
static struct qcom_icc_node qhm_cnoc_dc_noc = {
.name = "qhm_cnoc_dc_noc",
.id = SM6350_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SM6350_SLAVE_LLCC_CFG,
SM6350_SLAVE_GEM_NOC_CFG
},
};
static struct qcom_icc_node acm_apps = {
.name = "acm_apps",
.id = SM6350_MASTER_AMPSS_M0,
.channels = 1,
.buswidth = 16,
.num_links = 2,
.links = { SM6350_SLAVE_LLCC,
SM6350_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node acm_sys_tcu = {
.name = "acm_sys_tcu",
.id = SM6350_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM6350_SLAVE_LLCC,
SM6350_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node qhm_gemnoc_cfg = {
.name = "qhm_gemnoc_cfg",
.id = SM6350_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 3,
.links = { SM6350_SLAVE_MCDMA_MS_MPU_CFG,
SM6350_SLAVE_SERVICE_GEM_NOC,
SM6350_SLAVE_MSS_PROC_MS_MPU_CFG
},
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
.id = SM6350_MASTER_COMPUTE_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
.links = { SM6350_SLAVE_LLCC,
SM6350_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SM6350_MASTER_MNOC_HF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
.links = { SM6350_SLAVE_LLCC,
SM6350_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SM6350_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
.links = { SM6350_SLAVE_LLCC,
SM6350_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SM6350_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SM6350_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM6350_SLAVE_LLCC },
};
static struct qcom_icc_node qxm_gpu = {
.name = "qxm_gpu",
.id = SM6350_MASTER_GRAPHICS_3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM6350_SLAVE_LLCC,
SM6350_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SM6350_MASTER_LLCC,
.channels = 2,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_SLAVE_EBI_CH0 },
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
.id = SM6350_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_SLAVE_SERVICE_MNOC },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
.id = SM6350_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM6350_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
.id = SM6350_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_hf = {
.name = "qxm_camnoc_hf",
.id = SM6350_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM6350_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_icp = {
.name = "qxm_camnoc_icp",
.id = SM6350_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
.id = SM6350_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM6350_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
.id = SM6350_MASTER_MDP_PORT0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM6350_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node amm_npu_sys = {
.name = "amm_npu_sys",
.id = SM6350_MASTER_NPU_SYS,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM6350_SLAVE_NPU_COMPUTE_NOC },
};
static struct qcom_icc_node qhm_npu_cfg = {
.name = "qhm_npu_cfg",
.id = SM6350_MASTER_NPU_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 8,
.links = { SM6350_SLAVE_SERVICE_NPU_NOC,
SM6350_SLAVE_ISENSE_CFG,
SM6350_SLAVE_NPU_LLM_CFG,
SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG,
SM6350_SLAVE_NPU_CP,
SM6350_SLAVE_NPU_TCM,
SM6350_SLAVE_NPU_CAL_DP0,
SM6350_SLAVE_NPU_DPM
},
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
.id = SM6350_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
.id = SM6350_A1NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 6,
.links = { SM6350_SLAVE_SNOC_GEM_NOC_SF,
SM6350_SLAVE_PIMEM,
SM6350_SLAVE_OCIMEM,
SM6350_SLAVE_APPSS,
SM6350_SNOC_CNOC_SLV,
SM6350_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
.id = SM6350_A2NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 7,
.links = { SM6350_SLAVE_SNOC_GEM_NOC_SF,
SM6350_SLAVE_PIMEM,
SM6350_SLAVE_OCIMEM,
SM6350_SLAVE_APPSS,
SM6350_SNOC_CNOC_SLV,
SM6350_SLAVE_TCU,
SM6350_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qnm_gemnoc = {
.name = "qnm_gemnoc",
.id = SM6350_MASTER_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 6,
.links = { SM6350_SLAVE_PIMEM,
SM6350_SLAVE_OCIMEM,
SM6350_SLAVE_APPSS,
SM6350_SNOC_CNOC_SLV,
SM6350_SLAVE_TCU,
SM6350_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.id = SM6350_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM6350_SLAVE_SNOC_GEM_NOC_GC,
SM6350_SLAVE_OCIMEM
},
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = SM6350_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SM6350_A1NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM6350_A1NOC_SNOC_MAS },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
.id = SM6350_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
.id = SM6350_A2NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM6350_A2NOC_SNOC_MAS },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
.id = SM6350_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_camnoc_uncomp = {
.name = "qns_camnoc_uncomp",
.id = SM6350_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = SM6350_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
.id = SM6350_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cdsp_gemnoc = {
.name = "qns_cdsp_gemnoc",
.id = SM6350_SLAVE_CDSP_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM6350_MASTER_COMPUTE_NOC },
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
.id = SM6350_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_MASTER_A1NOC_CFG },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
.id = SM6350_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_MASTER_A2NOC_CFG },
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
.id = SM6350_SLAVE_AHB2PHY,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy2 = {
.name = "qhs_ahb2phy2",
.id = SM6350_SLAVE_AHB2PHY_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SM6350_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_boot_rom = {
.name = "qhs_boot_rom",
.id = SM6350_SLAVE_BOOT_ROM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
.id = SM6350_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_nrt_thrott_cfg = {
.name = "qhs_camera_nrt_thrott_cfg",
.id = SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
.name = "qhs_camera_rt_throttle_cfg",
.id = SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SM6350_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = SM6350_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
.id = SM6350_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SM6350_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
.id = SM6350_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
.id = SM6350_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_MASTER_CNOC_DC_NOC },
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
.id = SM6350_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_throttle_cfg = {
.name = "qhs_display_throttle_cfg",
.id = SM6350_SLAVE_DISPLAY_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emmc_cfg = {
.name = "qhs_emmc_cfg",
.id = SM6350_SLAVE_EMMC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_glm = {
.name = "qhs_glm",
.id = SM6350_SLAVE_GLM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
.id = SM6350_SLAVE_GRAPHICS_3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SM6350_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SM6350_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
.id = SM6350_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_MASTER_CNOC_MNOC_CFG },
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
.id = SM6350_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_npu_cfg = {
.name = "qhs_npu_cfg",
.id = SM6350_SLAVE_NPU_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_MASTER_NPU_NOC_CFG },
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SM6350_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = SM6350_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
.id = SM6350_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SM6350_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qm_cfg = {
.name = "qhs_qm_cfg",
.id = SM6350_SLAVE_QM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qm_mpu_cfg = {
.name = "qhs_qm_mpu_cfg",
.id = SM6350_SLAVE_QM_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
.id = SM6350_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
.id = SM6350_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
.id = SM6350_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
.id = SM6350_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
.id = SM6350_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SM6350_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
.id = SM6350_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
.id = SM6350_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
.id = SM6350_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_throttle_cfg = {
.name = "qhs_venus_throttle_cfg",
.id = SM6350_SLAVE_VENUS_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
.id = SM6350_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
.id = SM6350_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gemnoc = {
.name = "qhs_gemnoc",
.id = SM6350_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM6350_MASTER_GEM_NOC_CFG },
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
.id = SM6350_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mcdma_ms_mpu_cfg = {
.name = "qhs_mcdma_ms_mpu_cfg",
.id = SM6350_SLAVE_MCDMA_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
.id = SM6350_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gem_noc_snoc = {
.name = "qns_gem_noc_snoc",
.id = SM6350_SLAVE_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_MASTER_GEM_NOC_SNOC },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SM6350_SLAVE_LLCC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM6350_MASTER_LLCC },
};
static struct qcom_icc_node srvc_gemnoc = {
.name = "srvc_gemnoc",
.id = SM6350_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SM6350_SLAVE_EBI_CH0,
.channels = 2,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
.id = SM6350_SLAVE_MNOC_HF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM6350_MASTER_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
.id = SM6350_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM6350_MASTER_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
.id = SM6350_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cal_dp0 = {
.name = "qhs_cal_dp0",
.id = SM6350_SLAVE_NPU_CAL_DP0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cp = {
.name = "qhs_cp",
.id = SM6350_SLAVE_NPU_CP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dma_bwmon = {
.name = "qhs_dma_bwmon",
.id = SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dpm = {
.name = "qhs_dpm",
.id = SM6350_SLAVE_NPU_DPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_isense = {
.name = "qhs_isense",
.id = SM6350_SLAVE_ISENSE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llm = {
.name = "qhs_llm",
.id = SM6350_SLAVE_NPU_LLM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcm = {
.name = "qhs_tcm",
.id = SM6350_SLAVE_NPU_TCM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_npu_sys = {
.name = "qns_npu_sys",
.id = SM6350_SLAVE_NPU_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
};
static struct qcom_icc_node srvc_noc = {
.name = "srvc_noc",
.id = SM6350_SLAVE_SERVICE_NPU_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SM6350_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
.id = SM6350_SNOC_CNOC_SLV,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_SNOC_CNOC_MAS },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
.id = SM6350_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM6350_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
.id = SM6350_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM6350_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SM6350_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
.id = SM6350_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SM6350_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SM6350_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SM6350_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.keepalive = true,
.num_nodes = 41,
.nodes = { &qnm_snoc,
&xm_qdss_dap,
&qhs_a1_noc_cfg,
&qhs_a2_noc_cfg,
&qhs_ahb2phy0,
&qhs_aoss,
&qhs_boot_rom,
&qhs_camera_cfg,
&qhs_camera_nrt_thrott_cfg,
&qhs_camera_rt_throttle_cfg,
&qhs_clk_ctl,
&qhs_cpr_cx,
&qhs_cpr_mx,
&qhs_crypto0_cfg,
&qhs_dcc_cfg,
&qhs_ddrss_cfg,
&qhs_display_cfg,
&qhs_display_throttle_cfg,
&qhs_glm,
&qhs_gpuss_cfg,
&qhs_imem_cfg,
&qhs_ipa,
&qhs_mnoc_cfg,
&qhs_mss_cfg,
&qhs_npu_cfg,
&qhs_pimem_cfg,
&qhs_prng,
&qhs_qdss_cfg,
&qhs_qm_cfg,
&qhs_qm_mpu_cfg,
&qhs_qup0,
&qhs_qup1,
&qhs_security,
&qhs_snoc_cfg,
&qhs_tcsr,
&qhs_ufs_mem_cfg,
&qhs_usb3_0,
&qhs_venus_cfg,
&qhs_venus_throttle_cfg,
&qhs_vsense_ctrl_cfg,
&srvc_cnoc
},
};
static struct qcom_icc_bcm bcm_cn1 = {
.name = "CN1",
.keepalive = false,
.num_nodes = 6,
.nodes = { &xm_emmc,
&xm_sdc2,
&qhs_ahb2phy2,
&qhs_emmc_cfg,
&qhs_pdm,
&qhs_sdc2
},
};
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_cdsp_gemnoc },
};
static struct qcom_icc_bcm bcm_co2 = {
.name = "CO2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_npu },
};
static struct qcom_icc_bcm bcm_co3 = {
.name = "CO3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_npu_dsp },
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.keepalive = true,
.num_nodes = 5,
.nodes = { &qxm_camnoc_hf0_uncomp,
&qxm_camnoc_icp_uncomp,
&qxm_camnoc_sf_uncomp,
&qxm_camnoc_hf,
&qxm_mdp0
},
};
static struct qcom_icc_bcm bcm_mm2 = {
.name = "MM2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_bcm bcm_mm3 = {
.name = "MM3",
.keepalive = false,
.num_nodes = 4,
.nodes = { &qhm_mnoc_cfg, &qnm_video0, &qnm_video_cvp, &qxm_camnoc_sf },
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.keepalive = false,
.num_nodes = 4,
.nodes = { &qup0_core_master, &qup1_core_master, &qup0_core_slave, &qup1_core_slave },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_sh2 = {
.name = "SH2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &acm_sys_tcu },
};
static struct qcom_icc_bcm bcm_sh3 = {
.name = "SH3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_bcm bcm_sh4 = {
.name = "SH4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &acm_apps },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_imem },
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_pimem },
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_qdss_stm },
};
static struct qcom_icc_bcm bcm_sn5 = {
.name = "SN5",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_bcm bcm_sn6 = {
.name = "SN6",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_bcm bcm_sn10 = {
.name = "SN10",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_gemnoc },
};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_cn1,
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_QUP_0] = &qhm_qup_0,
[MASTER_EMMC] = &xm_emmc,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[A1NOC_SNOC_SLV] = &qns_a1noc_snoc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
static const struct qcom_icc_desc sm6350_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_cn1,
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_1] = &qhm_qup_1,
[MASTER_CRYPTO_CORE_0] = &qxm_crypto,
[MASTER_IPA] = &qxm_ipa,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[MASTER_SDCC_2] = &xm_sdc2,
[MASTER_USB3] = &xm_usb3_0,
[A2NOC_SNOC_SLV] = &qns_a2noc_snoc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
static const struct qcom_icc_desc sm6350_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
&bcm_mm1,
&bcm_qup0,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_ICP_UNCOMP] = &qxm_camnoc_icp_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_LLCC] = &llcc_mc,
[SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_EBI_CH0] = &ebi,
};
static const struct qcom_icc_desc sm6350_clk_virt = {
.nodes = clk_virt_nodes,
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co2,
&bcm_co3,
};
static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &qnm_npu,
[MASTER_NPU_PROC] = &qxm_npu_dsp,
[SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc,
};
static const struct qcom_icc_desc sm6350_compute_noc = {
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
.num_bcms = ARRAY_SIZE(compute_noc_bcms),
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
};
static struct qcom_icc_node * const config_noc_nodes[] = {
[SNOC_CNOC_MAS] = &qnm_snoc,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
[SLAVE_AHB2PHY] = &qhs_ahb2phy0,
[SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_BOOT_ROM] = &qhs_boot_rom,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_thrott_cfg,
[SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_DCC_CFG] = &qhs_dcc_cfg,
[SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
[SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
[SLAVE_DISPLAY_THROTTLE_CFG] = &qhs_display_throttle_cfg,
[SLAVE_EMMC_CFG] = &qhs_emmc_cfg,
[SLAVE_GLM] = &qhs_glm,
[SLAVE_GRAPHICS_3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
[SLAVE_CNOC_MSS] = &qhs_mss_cfg,
[SLAVE_NPU_CFG] = &qhs_npu_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QM_CFG] = &qhs_qm_cfg,
[SLAVE_QM_MPU_CFG] = &qhs_qm_mpu_cfg,
[SLAVE_QUP_0] = &qhs_qup0,
[SLAVE_QUP_1] = &qhs_qup1,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SECURITY] = &qhs_security,
[SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB3] = &qhs_usb3_0,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VENUS_THROTTLE_CFG] = &qhs_venus_throttle_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
static const struct qcom_icc_desc sm6350_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
[SLAVE_GEM_NOC_CFG] = &qhs_gemnoc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
};
static const struct qcom_icc_desc sm6350_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_AMPSS_M0] = &acm_apps,
[MASTER_SYS_TCU] = &acm_sys_tcu,
[MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
[MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[MASTER_GRAPHICS_3D] = &qxm_gpu,
[SLAVE_MCDMA_MS_MPU_CFG] = &qhs_mcdma_ms_mpu_cfg,
[SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
[SLAVE_GEM_NOC_SNOC] = &qns_gem_noc_snoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
};
static const struct qcom_icc_desc sm6350_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
&bcm_mm3,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_VIDEO_P0] = &qnm_video0,
[MASTER_VIDEO_PROC] = &qnm_video_cvp,
[MASTER_CAMNOC_HF] = &qxm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qxm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
[MASTER_MDP_PORT0] = &qxm_mdp0,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
static const struct qcom_icc_desc sm6350_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_bcm * const npu_noc_bcms[] = {
};
static struct qcom_icc_node * const npu_noc_nodes[] = {
[MASTER_NPU_SYS] = &amm_npu_sys,
[MASTER_NPU_NOC_CFG] = &qhm_npu_cfg,
[SLAVE_NPU_CAL_DP0] = &qhs_cal_dp0,
[SLAVE_NPU_CP] = &qhs_cp,
[SLAVE_NPU_INT_DMA_BWMON_CFG] = &qhs_dma_bwmon,
[SLAVE_NPU_DPM] = &qhs_dpm,
[SLAVE_ISENSE_CFG] = &qhs_isense,
[SLAVE_NPU_LLM_CFG] = &qhs_llm,
[SLAVE_NPU_TCM] = &qhs_tcm,
[SLAVE_NPU_COMPUTE_NOC] = &qns_npu_sys,
[SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
};
static const struct qcom_icc_desc sm6350_npu_noc = {
.nodes = npu_noc_nodes,
.num_nodes = ARRAY_SIZE(npu_noc_nodes),
.bcms = npu_noc_bcms,
.num_bcms = ARRAY_SIZE(npu_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn10,
&bcm_sn2,
&bcm_sn3,
&bcm_sn4,
&bcm_sn5,
&bcm_sn6,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
[A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
[MASTER_GEM_NOC_SNOC] = &qnm_gemnoc,
[MASTER_PIMEM] = &qxm_pimem,
[MASTER_GIC] = &xm_gic,
[SLAVE_APPSS] = &qhs_apss,
[SNOC_CNOC_SLV] = &qns_cnoc,
[SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
[SLAVE_OCIMEM] = &qxs_imem,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sm6350_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sm6350-aggre1-noc",
.data = &sm6350_aggre1_noc},
{ .compatible = "qcom,sm6350-aggre2-noc",
.data = &sm6350_aggre2_noc},
{ .compatible = "qcom,sm6350-clk-virt",
.data = &sm6350_clk_virt},
{ .compatible = "qcom,sm6350-compute-noc",
.data = &sm6350_compute_noc},
{ .compatible = "qcom,sm6350-config-noc",
.data = &sm6350_config_noc},
{ .compatible = "qcom,sm6350-dc-noc",
.data = &sm6350_dc_noc},
{ .compatible = "qcom,sm6350-gem-noc",
.data = &sm6350_gem_noc},
{ .compatible = "qcom,sm6350-mmss-noc",
.data = &sm6350_mmss_noc},
{ .compatible = "qcom,sm6350-npu-noc",
.data = &sm6350_npu_noc},
{ .compatible = "qcom,sm6350-system-noc",
.data = &sm6350_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm6350",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
MODULE_DESCRIPTION("Qualcomm SM6350 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/sm6350.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qualcomm MSM8996 Network-on-Chip (NoC) QoS driver
*
* Copyright (c) 2021 Yassine Oudjana <[email protected]>
*/
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <dt-bindings/interconnect/qcom,msm8996.h>
#include "icc-rpm.h"
#include "msm8996.h"
static const char * const mm_intf_clocks[] = {
"iface"
};
static const char * const a0noc_intf_clocks[] = {
"aggre0_snoc_axi",
"aggre0_cnoc_ahb",
"aggre0_noc_mpu_cfg"
};
static const char * const a2noc_intf_clocks[] = {
"aggre2_ufs_axi",
"ufs_axi"
};
static const u16 mas_a0noc_common_links[] = {
MSM8996_SLAVE_A0NOC_SNOC
};
static struct qcom_icc_node mas_pcie_0 = {
.name = "mas_pcie_0",
.id = MSM8996_MASTER_PCIE_0,
.buswidth = 8,
.mas_rpm_id = 65,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_a0noc_common_links),
.links = mas_a0noc_common_links
};
static struct qcom_icc_node mas_pcie_1 = {
.name = "mas_pcie_1",
.id = MSM8996_MASTER_PCIE_1,
.buswidth = 8,
.mas_rpm_id = 66,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 1,
.num_links = ARRAY_SIZE(mas_a0noc_common_links),
.links = mas_a0noc_common_links
};
static struct qcom_icc_node mas_pcie_2 = {
.name = "mas_pcie_2",
.id = MSM8996_MASTER_PCIE_2,
.buswidth = 8,
.mas_rpm_id = 119,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 2,
.num_links = ARRAY_SIZE(mas_a0noc_common_links),
.links = mas_a0noc_common_links
};
static const u16 mas_a1noc_common_links[] = {
MSM8996_SLAVE_A1NOC_SNOC
};
static struct qcom_icc_node mas_cnoc_a1noc = {
.name = "mas_cnoc_a1noc",
.id = MSM8996_MASTER_CNOC_A1NOC,
.buswidth = 8,
.mas_rpm_id = 116,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_a1noc_common_links),
.links = mas_a1noc_common_links
};
static struct qcom_icc_node mas_crypto_c0 = {
.name = "mas_crypto_c0",
.id = MSM8996_MASTER_CRYPTO_CORE0,
.buswidth = 8,
.mas_rpm_id = 23,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_a1noc_common_links),
.links = mas_a1noc_common_links
};
static struct qcom_icc_node mas_pnoc_a1noc = {
.name = "mas_pnoc_a1noc",
.id = MSM8996_MASTER_PNOC_A1NOC,
.buswidth = 8,
.mas_rpm_id = 117,
.slv_rpm_id = -1,
.qos.ap_owned = false,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 1,
.num_links = ARRAY_SIZE(mas_a1noc_common_links),
.links = mas_a1noc_common_links
};
static const u16 mas_a2noc_common_links[] = {
MSM8996_SLAVE_A2NOC_SNOC
};
static struct qcom_icc_node mas_usb3 = {
.name = "mas_usb3",
.id = MSM8996_MASTER_USB3,
.buswidth = 8,
.mas_rpm_id = 32,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 3,
.num_links = ARRAY_SIZE(mas_a2noc_common_links),
.links = mas_a2noc_common_links
};
static struct qcom_icc_node mas_ipa = {
.name = "mas_ipa",
.id = MSM8996_MASTER_IPA,
.buswidth = 8,
.mas_rpm_id = 59,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = -1,
.num_links = ARRAY_SIZE(mas_a2noc_common_links),
.links = mas_a2noc_common_links
};
static struct qcom_icc_node mas_ufs = {
.name = "mas_ufs",
.id = MSM8996_MASTER_UFS,
.buswidth = 8,
.mas_rpm_id = 68,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 2,
.num_links = ARRAY_SIZE(mas_a2noc_common_links),
.links = mas_a2noc_common_links
};
static const u16 mas_apps_proc_links[] = {
MSM8996_SLAVE_BIMC_SNOC_1,
MSM8996_SLAVE_EBI_CH0,
MSM8996_SLAVE_BIMC_SNOC_0
};
static struct qcom_icc_node mas_apps_proc = {
.name = "mas_apps_proc",
.id = MSM8996_MASTER_AMPSS_M0,
.buswidth = 8,
.mas_rpm_id = 0,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_apps_proc_links),
.links = mas_apps_proc_links
};
static const u16 mas_oxili_common_links[] = {
MSM8996_SLAVE_BIMC_SNOC_1,
MSM8996_SLAVE_HMSS_L3,
MSM8996_SLAVE_EBI_CH0,
MSM8996_SLAVE_BIMC_SNOC_0
};
static struct qcom_icc_node mas_oxili = {
.name = "mas_oxili",
.id = MSM8996_MASTER_GRAPHICS_3D,
.buswidth = 8,
.mas_rpm_id = 6,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 1,
.num_links = ARRAY_SIZE(mas_oxili_common_links),
.links = mas_oxili_common_links
};
static struct qcom_icc_node mas_mnoc_bimc = {
.name = "mas_mnoc_bimc",
.id = MSM8996_MASTER_MNOC_BIMC,
.buswidth = 8,
.mas_rpm_id = 2,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 2,
.num_links = ARRAY_SIZE(mas_oxili_common_links),
.links = mas_oxili_common_links
};
static const u16 mas_snoc_bimc_links[] = {
MSM8996_SLAVE_HMSS_L3,
MSM8996_SLAVE_EBI_CH0
};
static struct qcom_icc_node mas_snoc_bimc = {
.name = "mas_snoc_bimc",
.id = MSM8996_MASTER_SNOC_BIMC,
.buswidth = 8,
.mas_rpm_id = 3,
.slv_rpm_id = -1,
.qos.ap_owned = false,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = -1,
.num_links = ARRAY_SIZE(mas_snoc_bimc_links),
.links = mas_snoc_bimc_links
};
static const u16 mas_snoc_cnoc_links[] = {
MSM8996_SLAVE_CLK_CTL,
MSM8996_SLAVE_RBCPR_CX,
MSM8996_SLAVE_A2NOC_SMMU_CFG,
MSM8996_SLAVE_A0NOC_MPU_CFG,
MSM8996_SLAVE_MESSAGE_RAM,
MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG,
MSM8996_SLAVE_PCIE_0_CFG,
MSM8996_SLAVE_TLMM,
MSM8996_SLAVE_MPM,
MSM8996_SLAVE_A0NOC_SMMU_CFG,
MSM8996_SLAVE_EBI1_PHY_CFG,
MSM8996_SLAVE_BIMC_CFG,
MSM8996_SLAVE_PIMEM_CFG,
MSM8996_SLAVE_RBCPR_MX,
MSM8996_SLAVE_PRNG,
MSM8996_SLAVE_PCIE20_AHB2PHY,
MSM8996_SLAVE_A2NOC_MPU_CFG,
MSM8996_SLAVE_QDSS_CFG,
MSM8996_SLAVE_A2NOC_CFG,
MSM8996_SLAVE_A0NOC_CFG,
MSM8996_SLAVE_UFS_CFG,
MSM8996_SLAVE_CRYPTO_0_CFG,
MSM8996_SLAVE_PCIE_1_CFG,
MSM8996_SLAVE_SNOC_CFG,
MSM8996_SLAVE_SNOC_MPU_CFG,
MSM8996_SLAVE_A1NOC_MPU_CFG,
MSM8996_SLAVE_A1NOC_SMMU_CFG,
MSM8996_SLAVE_PCIE_2_CFG,
MSM8996_SLAVE_CNOC_MNOC_CFG,
MSM8996_SLAVE_QDSS_RBCPR_APU_CFG,
MSM8996_SLAVE_PMIC_ARB,
MSM8996_SLAVE_IMEM_CFG,
MSM8996_SLAVE_A1NOC_CFG,
MSM8996_SLAVE_SSC_CFG,
MSM8996_SLAVE_TCSR,
MSM8996_SLAVE_LPASS_SMMU_CFG,
MSM8996_SLAVE_DCC_CFG
};
static struct qcom_icc_node mas_snoc_cnoc = {
.name = "mas_snoc_cnoc",
.id = MSM8996_MASTER_SNOC_CNOC,
.buswidth = 8,
.mas_rpm_id = 52,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_cnoc_links),
.links = mas_snoc_cnoc_links
};
static const u16 mas_qdss_dap_links[] = {
MSM8996_SLAVE_QDSS_RBCPR_APU_CFG,
MSM8996_SLAVE_RBCPR_CX,
MSM8996_SLAVE_A2NOC_SMMU_CFG,
MSM8996_SLAVE_A0NOC_MPU_CFG,
MSM8996_SLAVE_MESSAGE_RAM,
MSM8996_SLAVE_PCIE_0_CFG,
MSM8996_SLAVE_TLMM,
MSM8996_SLAVE_MPM,
MSM8996_SLAVE_A0NOC_SMMU_CFG,
MSM8996_SLAVE_EBI1_PHY_CFG,
MSM8996_SLAVE_BIMC_CFG,
MSM8996_SLAVE_PIMEM_CFG,
MSM8996_SLAVE_RBCPR_MX,
MSM8996_SLAVE_CLK_CTL,
MSM8996_SLAVE_PRNG,
MSM8996_SLAVE_PCIE20_AHB2PHY,
MSM8996_SLAVE_A2NOC_MPU_CFG,
MSM8996_SLAVE_QDSS_CFG,
MSM8996_SLAVE_A2NOC_CFG,
MSM8996_SLAVE_A0NOC_CFG,
MSM8996_SLAVE_UFS_CFG,
MSM8996_SLAVE_CRYPTO_0_CFG,
MSM8996_SLAVE_CNOC_A1NOC,
MSM8996_SLAVE_PCIE_1_CFG,
MSM8996_SLAVE_SNOC_CFG,
MSM8996_SLAVE_SNOC_MPU_CFG,
MSM8996_SLAVE_A1NOC_MPU_CFG,
MSM8996_SLAVE_A1NOC_SMMU_CFG,
MSM8996_SLAVE_PCIE_2_CFG,
MSM8996_SLAVE_CNOC_MNOC_CFG,
MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG,
MSM8996_SLAVE_PMIC_ARB,
MSM8996_SLAVE_IMEM_CFG,
MSM8996_SLAVE_A1NOC_CFG,
MSM8996_SLAVE_SSC_CFG,
MSM8996_SLAVE_TCSR,
MSM8996_SLAVE_LPASS_SMMU_CFG,
MSM8996_SLAVE_DCC_CFG
};
static struct qcom_icc_node mas_qdss_dap = {
.name = "mas_qdss_dap",
.id = MSM8996_MASTER_QDSS_DAP,
.buswidth = 8,
.mas_rpm_id = 49,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_qdss_dap_links),
.links = mas_qdss_dap_links
};
static const u16 mas_cnoc_mnoc_mmss_cfg_links[] = {
MSM8996_SLAVE_MMAGIC_CFG,
MSM8996_SLAVE_DSA_MPU_CFG,
MSM8996_SLAVE_MMSS_CLK_CFG,
MSM8996_SLAVE_CAMERA_THROTTLE_CFG,
MSM8996_SLAVE_VENUS_CFG,
MSM8996_SLAVE_SMMU_VFE_CFG,
MSM8996_SLAVE_MISC_CFG,
MSM8996_SLAVE_SMMU_CPP_CFG,
MSM8996_SLAVE_GRAPHICS_3D_CFG,
MSM8996_SLAVE_DISPLAY_THROTTLE_CFG,
MSM8996_SLAVE_VENUS_THROTTLE_CFG,
MSM8996_SLAVE_CAMERA_CFG,
MSM8996_SLAVE_DISPLAY_CFG,
MSM8996_SLAVE_CPR_CFG,
MSM8996_SLAVE_SMMU_ROTATOR_CFG,
MSM8996_SLAVE_DSA_CFG,
MSM8996_SLAVE_SMMU_VENUS_CFG,
MSM8996_SLAVE_VMEM_CFG,
MSM8996_SLAVE_SMMU_JPEG_CFG,
MSM8996_SLAVE_SMMU_MDP_CFG,
MSM8996_SLAVE_MNOC_MPU_CFG
};
static struct qcom_icc_node mas_cnoc_mnoc_mmss_cfg = {
.name = "mas_cnoc_mnoc_mmss_cfg",
.id = MSM8996_MASTER_CNOC_MNOC_MMSS_CFG,
.buswidth = 8,
.mas_rpm_id = 4,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_cnoc_mnoc_mmss_cfg_links),
.links = mas_cnoc_mnoc_mmss_cfg_links
};
static const u16 mas_cnoc_mnoc_cfg_links[] = {
MSM8996_SLAVE_SERVICE_MNOC
};
static struct qcom_icc_node mas_cnoc_mnoc_cfg = {
.name = "mas_cnoc_mnoc_cfg",
.id = MSM8996_MASTER_CNOC_MNOC_CFG,
.buswidth = 8,
.mas_rpm_id = 5,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_cnoc_mnoc_cfg_links),
.links = mas_cnoc_mnoc_cfg_links
};
static const u16 mas_mnoc_bimc_common_links[] = {
MSM8996_SLAVE_MNOC_BIMC
};
static struct qcom_icc_node mas_cpp = {
.name = "mas_cpp",
.id = MSM8996_MASTER_CPP,
.buswidth = 32,
.mas_rpm_id = 115,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 5,
.num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
.links = mas_mnoc_bimc_common_links
};
static struct qcom_icc_node mas_jpeg = {
.name = "mas_jpeg",
.id = MSM8996_MASTER_JPEG,
.buswidth = 32,
.mas_rpm_id = 7,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 7,
.num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
.links = mas_mnoc_bimc_common_links
};
static struct qcom_icc_node mas_mdp_p0 = {
.name = "mas_mdp_p0",
.id = MSM8996_MASTER_MDP_PORT0,
.buswidth = 32,
.mas_rpm_id = 8,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 1,
.num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
.links = mas_mnoc_bimc_common_links
};
static struct qcom_icc_node mas_mdp_p1 = {
.name = "mas_mdp_p1",
.id = MSM8996_MASTER_MDP_PORT1,
.buswidth = 32,
.mas_rpm_id = 61,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 2,
.num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
.links = mas_mnoc_bimc_common_links
};
static struct qcom_icc_node mas_rotator = {
.name = "mas_rotator",
.id = MSM8996_MASTER_ROTATOR,
.buswidth = 32,
.mas_rpm_id = 120,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
.links = mas_mnoc_bimc_common_links
};
static struct qcom_icc_node mas_venus = {
.name = "mas_venus",
.id = MSM8996_MASTER_VIDEO_P0,
.buswidth = 32,
.mas_rpm_id = 9,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 3,
.num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
.links = mas_mnoc_bimc_common_links
};
static struct qcom_icc_node mas_vfe = {
.name = "mas_vfe",
.id = MSM8996_MASTER_VFE,
.buswidth = 32,
.mas_rpm_id = 11,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 6,
.num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
.links = mas_mnoc_bimc_common_links
};
static const u16 mas_vmem_common_links[] = {
MSM8996_SLAVE_VMEM
};
static struct qcom_icc_node mas_snoc_vmem = {
.name = "mas_snoc_vmem",
.id = MSM8996_MASTER_SNOC_VMEM,
.buswidth = 32,
.mas_rpm_id = 114,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_vmem_common_links),
.links = mas_vmem_common_links
};
static struct qcom_icc_node mas_venus_vmem = {
.name = "mas_venus_vmem",
.id = MSM8996_MASTER_VIDEO_P0_OCMEM,
.buswidth = 32,
.mas_rpm_id = 121,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_vmem_common_links),
.links = mas_vmem_common_links
};
static const u16 mas_snoc_pnoc_links[] = {
MSM8996_SLAVE_BLSP_1,
MSM8996_SLAVE_BLSP_2,
MSM8996_SLAVE_SDCC_1,
MSM8996_SLAVE_SDCC_2,
MSM8996_SLAVE_SDCC_4,
MSM8996_SLAVE_TSIF,
MSM8996_SLAVE_PDM,
MSM8996_SLAVE_AHB2PHY
};
static struct qcom_icc_node mas_snoc_pnoc = {
.name = "mas_snoc_pnoc",
.id = MSM8996_MASTER_SNOC_PNOC,
.buswidth = 8,
.mas_rpm_id = 44,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_pnoc_links),
.links = mas_snoc_pnoc_links
};
static const u16 mas_pnoc_a1noc_common_links[] = {
MSM8996_SLAVE_PNOC_A1NOC
};
static struct qcom_icc_node mas_sdcc_1 = {
.name = "mas_sdcc_1",
.id = MSM8996_MASTER_SDCC_1,
.buswidth = 8,
.mas_rpm_id = 33,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
.links = mas_pnoc_a1noc_common_links
};
static struct qcom_icc_node mas_sdcc_2 = {
.name = "mas_sdcc_2",
.id = MSM8996_MASTER_SDCC_2,
.buswidth = 8,
.mas_rpm_id = 35,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
.links = mas_pnoc_a1noc_common_links
};
static struct qcom_icc_node mas_sdcc_4 = {
.name = "mas_sdcc_4",
.id = MSM8996_MASTER_SDCC_4,
.buswidth = 8,
.mas_rpm_id = 36,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
.links = mas_pnoc_a1noc_common_links
};
static struct qcom_icc_node mas_usb_hs = {
.name = "mas_usb_hs",
.id = MSM8996_MASTER_USB_HS,
.buswidth = 8,
.mas_rpm_id = 42,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
.links = mas_pnoc_a1noc_common_links
};
static struct qcom_icc_node mas_blsp_1 = {
.name = "mas_blsp_1",
.id = MSM8996_MASTER_BLSP_1,
.buswidth = 4,
.mas_rpm_id = 41,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
.links = mas_pnoc_a1noc_common_links
};
static struct qcom_icc_node mas_blsp_2 = {
.name = "mas_blsp_2",
.id = MSM8996_MASTER_BLSP_2,
.buswidth = 4,
.mas_rpm_id = 39,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
.links = mas_pnoc_a1noc_common_links
};
static struct qcom_icc_node mas_tsif = {
.name = "mas_tsif",
.id = MSM8996_MASTER_TSIF,
.buswidth = 4,
.mas_rpm_id = 37,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
.links = mas_pnoc_a1noc_common_links
};
static const u16 mas_hmss_links[] = {
MSM8996_SLAVE_PIMEM,
MSM8996_SLAVE_OCIMEM,
MSM8996_SLAVE_SNOC_BIMC
};
static struct qcom_icc_node mas_hmss = {
.name = "mas_hmss",
.id = MSM8996_MASTER_HMSS,
.buswidth = 8,
.mas_rpm_id = 118,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 4,
.num_links = ARRAY_SIZE(mas_hmss_links),
.links = mas_hmss_links
};
static const u16 mas_qdss_common_links[] = {
MSM8996_SLAVE_PIMEM,
MSM8996_SLAVE_USB3,
MSM8996_SLAVE_OCIMEM,
MSM8996_SLAVE_SNOC_BIMC,
MSM8996_SLAVE_SNOC_PNOC
};
static struct qcom_icc_node mas_qdss_bam = {
.name = "mas_qdss_bam",
.id = MSM8996_MASTER_QDSS_BAM,
.buswidth = 16,
.mas_rpm_id = 19,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 2,
.num_links = ARRAY_SIZE(mas_qdss_common_links),
.links = mas_qdss_common_links
};
static const u16 mas_snoc_cfg_links[] = {
MSM8996_SLAVE_SERVICE_SNOC
};
static struct qcom_icc_node mas_snoc_cfg = {
.name = "mas_snoc_cfg",
.id = MSM8996_MASTER_SNOC_CFG,
.buswidth = 16,
.mas_rpm_id = 20,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_snoc_cfg_links),
.links = mas_snoc_cfg_links
};
static const u16 mas_bimc_snoc_0_links[] = {
MSM8996_SLAVE_SNOC_VMEM,
MSM8996_SLAVE_USB3,
MSM8996_SLAVE_PIMEM,
MSM8996_SLAVE_LPASS,
MSM8996_SLAVE_APPSS,
MSM8996_SLAVE_SNOC_CNOC,
MSM8996_SLAVE_SNOC_PNOC,
MSM8996_SLAVE_OCIMEM,
MSM8996_SLAVE_QDSS_STM
};
static struct qcom_icc_node mas_bimc_snoc_0 = {
.name = "mas_bimc_snoc_0",
.id = MSM8996_MASTER_BIMC_SNOC_0,
.buswidth = 16,
.mas_rpm_id = 21,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_bimc_snoc_0_links),
.links = mas_bimc_snoc_0_links
};
static const u16 mas_bimc_snoc_1_links[] = {
MSM8996_SLAVE_PCIE_2,
MSM8996_SLAVE_PCIE_1,
MSM8996_SLAVE_PCIE_0
};
static struct qcom_icc_node mas_bimc_snoc_1 = {
.name = "mas_bimc_snoc_1",
.id = MSM8996_MASTER_BIMC_SNOC_1,
.buswidth = 16,
.mas_rpm_id = 109,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_bimc_snoc_1_links),
.links = mas_bimc_snoc_1_links
};
static const u16 mas_a0noc_snoc_links[] = {
MSM8996_SLAVE_SNOC_PNOC,
MSM8996_SLAVE_OCIMEM,
MSM8996_SLAVE_APPSS,
MSM8996_SLAVE_SNOC_BIMC,
MSM8996_SLAVE_PIMEM
};
static struct qcom_icc_node mas_a0noc_snoc = {
.name = "mas_a0noc_snoc",
.id = MSM8996_MASTER_A0NOC_SNOC,
.buswidth = 16,
.mas_rpm_id = 110,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_a0noc_snoc_links),
.links = mas_a0noc_snoc_links
};
static const u16 mas_a1noc_snoc_links[] = {
MSM8996_SLAVE_SNOC_VMEM,
MSM8996_SLAVE_USB3,
MSM8996_SLAVE_PCIE_0,
MSM8996_SLAVE_PIMEM,
MSM8996_SLAVE_PCIE_2,
MSM8996_SLAVE_LPASS,
MSM8996_SLAVE_PCIE_1,
MSM8996_SLAVE_APPSS,
MSM8996_SLAVE_SNOC_BIMC,
MSM8996_SLAVE_SNOC_CNOC,
MSM8996_SLAVE_SNOC_PNOC,
MSM8996_SLAVE_OCIMEM,
MSM8996_SLAVE_QDSS_STM
};
static struct qcom_icc_node mas_a1noc_snoc = {
.name = "mas_a1noc_snoc",
.id = MSM8996_MASTER_A1NOC_SNOC,
.buswidth = 16,
.mas_rpm_id = 111,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_a1noc_snoc_links),
.links = mas_a1noc_snoc_links
};
static const u16 mas_a2noc_snoc_links[] = {
MSM8996_SLAVE_SNOC_VMEM,
MSM8996_SLAVE_USB3,
MSM8996_SLAVE_PCIE_1,
MSM8996_SLAVE_PIMEM,
MSM8996_SLAVE_PCIE_2,
MSM8996_SLAVE_QDSS_STM,
MSM8996_SLAVE_LPASS,
MSM8996_SLAVE_SNOC_BIMC,
MSM8996_SLAVE_SNOC_CNOC,
MSM8996_SLAVE_SNOC_PNOC,
MSM8996_SLAVE_OCIMEM,
MSM8996_SLAVE_PCIE_0
};
static struct qcom_icc_node mas_a2noc_snoc = {
.name = "mas_a2noc_snoc",
.id = MSM8996_MASTER_A2NOC_SNOC,
.buswidth = 16,
.mas_rpm_id = 112,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_a2noc_snoc_links),
.links = mas_a2noc_snoc_links
};
static struct qcom_icc_node mas_qdss_etr = {
.name = "mas_qdss_etr",
.id = MSM8996_MASTER_QDSS_ETR,
.buswidth = 16,
.mas_rpm_id = 31,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 3,
.num_links = ARRAY_SIZE(mas_qdss_common_links),
.links = mas_qdss_common_links
};
static const u16 slv_a0noc_snoc_links[] = {
MSM8996_MASTER_A0NOC_SNOC
};
static struct qcom_icc_node slv_a0noc_snoc = {
.name = "slv_a0noc_snoc",
.id = MSM8996_SLAVE_A0NOC_SNOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 141,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_a0noc_snoc_links),
.links = slv_a0noc_snoc_links
};
static const u16 slv_a1noc_snoc_links[] = {
MSM8996_MASTER_A1NOC_SNOC
};
static struct qcom_icc_node slv_a1noc_snoc = {
.name = "slv_a1noc_snoc",
.id = MSM8996_SLAVE_A1NOC_SNOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 142,
.num_links = ARRAY_SIZE(slv_a1noc_snoc_links),
.links = slv_a1noc_snoc_links
};
static const u16 slv_a2noc_snoc_links[] = {
MSM8996_MASTER_A2NOC_SNOC
};
static struct qcom_icc_node slv_a2noc_snoc = {
.name = "slv_a2noc_snoc",
.id = MSM8996_SLAVE_A2NOC_SNOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 143,
.num_links = ARRAY_SIZE(slv_a2noc_snoc_links),
.links = slv_a2noc_snoc_links
};
static struct qcom_icc_node slv_ebi = {
.name = "slv_ebi",
.id = MSM8996_SLAVE_EBI_CH0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 0
};
static struct qcom_icc_node slv_hmss_l3 = {
.name = "slv_hmss_l3",
.id = MSM8996_SLAVE_HMSS_L3,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 160
};
static const u16 slv_bimc_snoc_0_links[] = {
MSM8996_MASTER_BIMC_SNOC_0
};
static struct qcom_icc_node slv_bimc_snoc_0 = {
.name = "slv_bimc_snoc_0",
.id = MSM8996_SLAVE_BIMC_SNOC_0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 2,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_bimc_snoc_0_links),
.links = slv_bimc_snoc_0_links
};
static const u16 slv_bimc_snoc_1_links[] = {
MSM8996_MASTER_BIMC_SNOC_1
};
static struct qcom_icc_node slv_bimc_snoc_1 = {
.name = "slv_bimc_snoc_1",
.id = MSM8996_SLAVE_BIMC_SNOC_1,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 138,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_bimc_snoc_1_links),
.links = slv_bimc_snoc_1_links
};
static const u16 slv_cnoc_a1noc_links[] = {
MSM8996_MASTER_CNOC_A1NOC
};
static struct qcom_icc_node slv_cnoc_a1noc = {
.name = "slv_cnoc_a1noc",
.id = MSM8996_SLAVE_CNOC_A1NOC,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 75,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_cnoc_a1noc_links),
.links = slv_cnoc_a1noc_links
};
static struct qcom_icc_node slv_clk_ctl = {
.name = "slv_clk_ctl",
.id = MSM8996_SLAVE_CLK_CTL,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 47
};
static struct qcom_icc_node slv_tcsr = {
.name = "slv_tcsr",
.id = MSM8996_SLAVE_TCSR,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 50
};
static struct qcom_icc_node slv_tlmm = {
.name = "slv_tlmm",
.id = MSM8996_SLAVE_TLMM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 51
};
static struct qcom_icc_node slv_crypto0_cfg = {
.name = "slv_crypto0_cfg",
.id = MSM8996_SLAVE_CRYPTO_0_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 52,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_mpm = {
.name = "slv_mpm",
.id = MSM8996_SLAVE_MPM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 62,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_pimem_cfg = {
.name = "slv_pimem_cfg",
.id = MSM8996_SLAVE_PIMEM_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 167,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_imem_cfg = {
.name = "slv_imem_cfg",
.id = MSM8996_SLAVE_IMEM_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 54,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_message_ram = {
.name = "slv_message_ram",
.id = MSM8996_SLAVE_MESSAGE_RAM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 55
};
static struct qcom_icc_node slv_bimc_cfg = {
.name = "slv_bimc_cfg",
.id = MSM8996_SLAVE_BIMC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 56,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_pmic_arb = {
.name = "slv_pmic_arb",
.id = MSM8996_SLAVE_PMIC_ARB,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 59
};
static struct qcom_icc_node slv_prng = {
.name = "slv_prng",
.id = MSM8996_SLAVE_PRNG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 127,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_dcc_cfg = {
.name = "slv_dcc_cfg",
.id = MSM8996_SLAVE_DCC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 155,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_rbcpr_mx = {
.name = "slv_rbcpr_mx",
.id = MSM8996_SLAVE_RBCPR_MX,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 170,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_qdss_cfg = {
.name = "slv_qdss_cfg",
.id = MSM8996_SLAVE_QDSS_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 63,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_rbcpr_cx = {
.name = "slv_rbcpr_cx",
.id = MSM8996_SLAVE_RBCPR_CX,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 169,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_cpu_apu_cfg = {
.name = "slv_cpu_apu_cfg",
.id = MSM8996_SLAVE_QDSS_RBCPR_APU_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 168,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static const u16 slv_cnoc_mnoc_cfg_links[] = {
MSM8996_MASTER_CNOC_MNOC_CFG
};
static struct qcom_icc_node slv_cnoc_mnoc_cfg = {
.name = "slv_cnoc_mnoc_cfg",
.id = MSM8996_SLAVE_CNOC_MNOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 66,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_cnoc_mnoc_cfg_links),
.links = slv_cnoc_mnoc_cfg_links
};
static struct qcom_icc_node slv_snoc_cfg = {
.name = "slv_snoc_cfg",
.id = MSM8996_SLAVE_SNOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 70,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_snoc_mpu_cfg = {
.name = "slv_snoc_mpu_cfg",
.id = MSM8996_SLAVE_SNOC_MPU_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 67,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_ebi1_phy_cfg = {
.name = "slv_ebi1_phy_cfg",
.id = MSM8996_SLAVE_EBI1_PHY_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 73,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_a0noc_cfg = {
.name = "slv_a0noc_cfg",
.id = MSM8996_SLAVE_A0NOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 144,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_pcie_1_cfg = {
.name = "slv_pcie_1_cfg",
.id = MSM8996_SLAVE_PCIE_1_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 89,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_pcie_2_cfg = {
.name = "slv_pcie_2_cfg",
.id = MSM8996_SLAVE_PCIE_2_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 165,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_pcie_0_cfg = {
.name = "slv_pcie_0_cfg",
.id = MSM8996_SLAVE_PCIE_0_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 88,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_pcie20_ahb2phy = {
.name = "slv_pcie20_ahb2phy",
.id = MSM8996_SLAVE_PCIE20_AHB2PHY,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 163,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_a0noc_mpu_cfg = {
.name = "slv_a0noc_mpu_cfg",
.id = MSM8996_SLAVE_A0NOC_MPU_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 145,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_ufs_cfg = {
.name = "slv_ufs_cfg",
.id = MSM8996_SLAVE_UFS_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 92,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_a1noc_cfg = {
.name = "slv_a1noc_cfg",
.id = MSM8996_SLAVE_A1NOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 147,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_a1noc_mpu_cfg = {
.name = "slv_a1noc_mpu_cfg",
.id = MSM8996_SLAVE_A1NOC_MPU_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 148,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_a2noc_cfg = {
.name = "slv_a2noc_cfg",
.id = MSM8996_SLAVE_A2NOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 150,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_a2noc_mpu_cfg = {
.name = "slv_a2noc_mpu_cfg",
.id = MSM8996_SLAVE_A2NOC_MPU_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 151,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_ssc_cfg = {
.name = "slv_ssc_cfg",
.id = MSM8996_SLAVE_SSC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 177,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_a0noc_smmu_cfg = {
.name = "slv_a0noc_smmu_cfg",
.id = MSM8996_SLAVE_A0NOC_SMMU_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 146,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_a1noc_smmu_cfg = {
.name = "slv_a1noc_smmu_cfg",
.id = MSM8996_SLAVE_A1NOC_SMMU_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 149,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_a2noc_smmu_cfg = {
.name = "slv_a2noc_smmu_cfg",
.id = MSM8996_SLAVE_A2NOC_SMMU_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 152,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_lpass_smmu_cfg = {
.name = "slv_lpass_smmu_cfg",
.id = MSM8996_SLAVE_LPASS_SMMU_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 161,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static const u16 slv_cnoc_mnoc_mmss_cfg_links[] = {
MSM8996_MASTER_CNOC_MNOC_MMSS_CFG
};
static struct qcom_icc_node slv_cnoc_mnoc_mmss_cfg = {
.name = "slv_cnoc_mnoc_mmss_cfg",
.id = MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 58,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_cnoc_mnoc_mmss_cfg_links),
.links = slv_cnoc_mnoc_mmss_cfg_links
};
static struct qcom_icc_node slv_mmagic_cfg = {
.name = "slv_mmagic_cfg",
.id = MSM8996_SLAVE_MMAGIC_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 162,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_cpr_cfg = {
.name = "slv_cpr_cfg",
.id = MSM8996_SLAVE_CPR_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 6,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_misc_cfg = {
.name = "slv_misc_cfg",
.id = MSM8996_SLAVE_MISC_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 8,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_venus_throttle_cfg = {
.name = "slv_venus_throttle_cfg",
.id = MSM8996_SLAVE_VENUS_THROTTLE_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 178,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_venus_cfg = {
.name = "slv_venus_cfg",
.id = MSM8996_SLAVE_VENUS_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 10,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_vmem_cfg = {
.name = "slv_vmem_cfg",
.id = MSM8996_SLAVE_VMEM_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 180,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_dsa_cfg = {
.name = "slv_dsa_cfg",
.id = MSM8996_SLAVE_DSA_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 157,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_mnoc_clocks_cfg = {
.name = "slv_mnoc_clocks_cfg",
.id = MSM8996_SLAVE_MMSS_CLK_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 12,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_dsa_mpu_cfg = {
.name = "slv_dsa_mpu_cfg",
.id = MSM8996_SLAVE_DSA_MPU_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 158,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_mnoc_mpu_cfg = {
.name = "slv_mnoc_mpu_cfg",
.id = MSM8996_SLAVE_MNOC_MPU_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 14,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_display_cfg = {
.name = "slv_display_cfg",
.id = MSM8996_SLAVE_DISPLAY_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_display_throttle_cfg = {
.name = "slv_display_throttle_cfg",
.id = MSM8996_SLAVE_DISPLAY_THROTTLE_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 156,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_camera_cfg = {
.name = "slv_camera_cfg",
.id = MSM8996_SLAVE_CAMERA_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 3,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_camera_throttle_cfg = {
.name = "slv_camera_throttle_cfg",
.id = MSM8996_SLAVE_CAMERA_THROTTLE_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 154,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_oxili_cfg = {
.name = "slv_oxili_cfg",
.id = MSM8996_SLAVE_GRAPHICS_3D_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 11,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_smmu_mdp_cfg = {
.name = "slv_smmu_mdp_cfg",
.id = MSM8996_SLAVE_SMMU_MDP_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 173,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_smmu_rot_cfg = {
.name = "slv_smmu_rot_cfg",
.id = MSM8996_SLAVE_SMMU_ROTATOR_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 174,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_smmu_venus_cfg = {
.name = "slv_smmu_venus_cfg",
.id = MSM8996_SLAVE_SMMU_VENUS_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 175,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_smmu_cpp_cfg = {
.name = "slv_smmu_cpp_cfg",
.id = MSM8996_SLAVE_SMMU_CPP_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 171,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_smmu_jpeg_cfg = {
.name = "slv_smmu_jpeg_cfg",
.id = MSM8996_SLAVE_SMMU_JPEG_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 172,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_smmu_vfe_cfg = {
.name = "slv_smmu_vfe_cfg",
.id = MSM8996_SLAVE_SMMU_VFE_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 176,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static const u16 slv_mnoc_bimc_links[] = {
MSM8996_MASTER_MNOC_BIMC
};
static struct qcom_icc_node slv_mnoc_bimc = {
.name = "slv_mnoc_bimc",
.id = MSM8996_SLAVE_MNOC_BIMC,
.buswidth = 32,
.mas_rpm_id = -1,
.slv_rpm_id = 16,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_mnoc_bimc_links),
.links = slv_mnoc_bimc_links
};
static struct qcom_icc_node slv_vmem = {
.name = "slv_vmem",
.id = MSM8996_SLAVE_VMEM,
.buswidth = 32,
.mas_rpm_id = -1,
.slv_rpm_id = 179,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_srvc_mnoc = {
.name = "slv_srvc_mnoc",
.id = MSM8996_SLAVE_SERVICE_MNOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 17,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static const u16 slv_pnoc_a1noc_links[] = {
MSM8996_MASTER_PNOC_A1NOC
};
static struct qcom_icc_node slv_pnoc_a1noc = {
.name = "slv_pnoc_a1noc",
.id = MSM8996_SLAVE_PNOC_A1NOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 139,
.num_links = ARRAY_SIZE(slv_pnoc_a1noc_links),
.links = slv_pnoc_a1noc_links
};
static struct qcom_icc_node slv_usb_hs = {
.name = "slv_usb_hs",
.id = MSM8996_SLAVE_USB_HS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 40
};
static struct qcom_icc_node slv_sdcc_2 = {
.name = "slv_sdcc_2",
.id = MSM8996_SLAVE_SDCC_2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 33
};
static struct qcom_icc_node slv_sdcc_4 = {
.name = "slv_sdcc_4",
.id = MSM8996_SLAVE_SDCC_4,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 34
};
static struct qcom_icc_node slv_tsif = {
.name = "slv_tsif",
.id = MSM8996_SLAVE_TSIF,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 35
};
static struct qcom_icc_node slv_blsp_2 = {
.name = "slv_blsp_2",
.id = MSM8996_SLAVE_BLSP_2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 37
};
static struct qcom_icc_node slv_sdcc_1 = {
.name = "slv_sdcc_1",
.id = MSM8996_SLAVE_SDCC_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 31
};
static struct qcom_icc_node slv_blsp_1 = {
.name = "slv_blsp_1",
.id = MSM8996_SLAVE_BLSP_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 39
};
static struct qcom_icc_node slv_pdm = {
.name = "slv_pdm",
.id = MSM8996_SLAVE_PDM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 41
};
static struct qcom_icc_node slv_ahb2phy = {
.name = "slv_ahb2phy",
.id = MSM8996_SLAVE_AHB2PHY,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 153,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_hmss = {
.name = "slv_hmss",
.id = MSM8996_SLAVE_APPSS,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 20,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_lpass = {
.name = "slv_lpass",
.id = MSM8996_SLAVE_LPASS,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 21,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_usb3 = {
.name = "slv_usb3",
.id = MSM8996_SLAVE_USB3,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 22,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static const u16 slv_snoc_bimc_links[] = {
MSM8996_MASTER_SNOC_BIMC
};
static struct qcom_icc_node slv_snoc_bimc = {
.name = "slv_snoc_bimc",
.id = MSM8996_SLAVE_SNOC_BIMC,
.buswidth = 32,
.mas_rpm_id = -1,
.slv_rpm_id = 24,
.num_links = ARRAY_SIZE(slv_snoc_bimc_links),
.links = slv_snoc_bimc_links
};
static const u16 slv_snoc_cnoc_links[] = {
MSM8996_MASTER_SNOC_CNOC
};
static struct qcom_icc_node slv_snoc_cnoc = {
.name = "slv_snoc_cnoc",
.id = MSM8996_SLAVE_SNOC_CNOC,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 25,
.num_links = ARRAY_SIZE(slv_snoc_cnoc_links),
.links = slv_snoc_cnoc_links
};
static struct qcom_icc_node slv_imem = {
.name = "slv_imem",
.id = MSM8996_SLAVE_OCIMEM,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 26
};
static struct qcom_icc_node slv_pimem = {
.name = "slv_pimem",
.id = MSM8996_SLAVE_PIMEM,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 166
};
static const u16 slv_snoc_vmem_links[] = {
MSM8996_MASTER_SNOC_VMEM
};
static struct qcom_icc_node slv_snoc_vmem = {
.name = "slv_snoc_vmem",
.id = MSM8996_SLAVE_SNOC_VMEM,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 140,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_snoc_vmem_links),
.links = slv_snoc_vmem_links
};
static const u16 slv_snoc_pnoc_links[] = {
MSM8996_MASTER_SNOC_PNOC
};
static struct qcom_icc_node slv_snoc_pnoc = {
.name = "slv_snoc_pnoc",
.id = MSM8996_SLAVE_SNOC_PNOC,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 28,
.num_links = ARRAY_SIZE(slv_snoc_pnoc_links),
.links = slv_snoc_pnoc_links
};
static struct qcom_icc_node slv_qdss_stm = {
.name = "slv_qdss_stm",
.id = MSM8996_SLAVE_QDSS_STM,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 30
};
static struct qcom_icc_node slv_pcie_0 = {
.name = "slv_pcie_0",
.id = MSM8996_SLAVE_PCIE_0,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 84,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_pcie_1 = {
.name = "slv_pcie_1",
.id = MSM8996_SLAVE_PCIE_1,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 85,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_pcie_2 = {
.name = "slv_pcie_2",
.id = MSM8996_SLAVE_PCIE_2,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 164,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node slv_srvc_snoc = {
.name = "slv_srvc_snoc",
.id = MSM8996_SLAVE_SERVICE_SNOC,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 29,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID
};
static struct qcom_icc_node * const a0noc_nodes[] = {
[MASTER_PCIE_0] = &mas_pcie_0,
[MASTER_PCIE_1] = &mas_pcie_1,
[MASTER_PCIE_2] = &mas_pcie_2
};
static const struct regmap_config msm8996_a0noc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x6000,
.fast_io = true
};
static const struct qcom_icc_desc msm8996_a0noc = {
.type = QCOM_ICC_NOC,
.nodes = a0noc_nodes,
.num_nodes = ARRAY_SIZE(a0noc_nodes),
.intf_clocks = a0noc_intf_clocks,
.num_intf_clocks = ARRAY_SIZE(a0noc_intf_clocks),
.regmap_cfg = &msm8996_a0noc_regmap_config
};
static struct qcom_icc_node * const a1noc_nodes[] = {
[MASTER_CNOC_A1NOC] = &mas_cnoc_a1noc,
[MASTER_CRYPTO_CORE0] = &mas_crypto_c0,
[MASTER_PNOC_A1NOC] = &mas_pnoc_a1noc
};
static const struct regmap_config msm8996_a1noc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x5000,
.fast_io = true
};
static const struct qcom_icc_desc msm8996_a1noc = {
.type = QCOM_ICC_NOC,
.nodes = a1noc_nodes,
.num_nodes = ARRAY_SIZE(a1noc_nodes),
.bus_clk_desc = &aggre1_branch_clk,
.regmap_cfg = &msm8996_a1noc_regmap_config
};
static struct qcom_icc_node * const a2noc_nodes[] = {
[MASTER_USB3] = &mas_usb3,
[MASTER_IPA] = &mas_ipa,
[MASTER_UFS] = &mas_ufs
};
static const struct regmap_config msm8996_a2noc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x7000,
.fast_io = true
};
static const struct qcom_icc_desc msm8996_a2noc = {
.type = QCOM_ICC_NOC,
.nodes = a2noc_nodes,
.num_nodes = ARRAY_SIZE(a2noc_nodes),
.bus_clk_desc = &aggre2_branch_clk,
.intf_clocks = a2noc_intf_clocks,
.num_intf_clocks = ARRAY_SIZE(a2noc_intf_clocks),
.regmap_cfg = &msm8996_a2noc_regmap_config
};
static struct qcom_icc_node * const bimc_nodes[] = {
[MASTER_AMPSS_M0] = &mas_apps_proc,
[MASTER_GRAPHICS_3D] = &mas_oxili,
[MASTER_MNOC_BIMC] = &mas_mnoc_bimc,
[MASTER_SNOC_BIMC] = &mas_snoc_bimc,
[SLAVE_EBI_CH0] = &slv_ebi,
[SLAVE_HMSS_L3] = &slv_hmss_l3,
[SLAVE_BIMC_SNOC_0] = &slv_bimc_snoc_0,
[SLAVE_BIMC_SNOC_1] = &slv_bimc_snoc_1
};
static const struct regmap_config msm8996_bimc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x5a000,
.fast_io = true
};
static const struct qcom_icc_desc msm8996_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = bimc_nodes,
.num_nodes = ARRAY_SIZE(bimc_nodes),
.bus_clk_desc = &bimc_clk,
.regmap_cfg = &msm8996_bimc_regmap_config
};
static struct qcom_icc_node * const cnoc_nodes[] = {
[MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
[MASTER_QDSS_DAP] = &mas_qdss_dap,
[SLAVE_CNOC_A1NOC] = &slv_cnoc_a1noc,
[SLAVE_CLK_CTL] = &slv_clk_ctl,
[SLAVE_TCSR] = &slv_tcsr,
[SLAVE_TLMM] = &slv_tlmm,
[SLAVE_CRYPTO_0_CFG] = &slv_crypto0_cfg,
[SLAVE_MPM] = &slv_mpm,
[SLAVE_PIMEM_CFG] = &slv_pimem_cfg,
[SLAVE_IMEM_CFG] = &slv_imem_cfg,
[SLAVE_MESSAGE_RAM] = &slv_message_ram,
[SLAVE_BIMC_CFG] = &slv_bimc_cfg,
[SLAVE_PMIC_ARB] = &slv_pmic_arb,
[SLAVE_PRNG] = &slv_prng,
[SLAVE_DCC_CFG] = &slv_dcc_cfg,
[SLAVE_RBCPR_MX] = &slv_rbcpr_mx,
[SLAVE_QDSS_CFG] = &slv_qdss_cfg,
[SLAVE_RBCPR_CX] = &slv_rbcpr_cx,
[SLAVE_QDSS_RBCPR_APU] = &slv_cpu_apu_cfg,
[SLAVE_CNOC_MNOC_CFG] = &slv_cnoc_mnoc_cfg,
[SLAVE_SNOC_CFG] = &slv_snoc_cfg,
[SLAVE_SNOC_MPU_CFG] = &slv_snoc_mpu_cfg,
[SLAVE_EBI1_PHY_CFG] = &slv_ebi1_phy_cfg,
[SLAVE_A0NOC_CFG] = &slv_a0noc_cfg,
[SLAVE_PCIE_1_CFG] = &slv_pcie_1_cfg,
[SLAVE_PCIE_2_CFG] = &slv_pcie_2_cfg,
[SLAVE_PCIE_0_CFG] = &slv_pcie_0_cfg,
[SLAVE_PCIE20_AHB2PHY] = &slv_pcie20_ahb2phy,
[SLAVE_A0NOC_MPU_CFG] = &slv_a0noc_mpu_cfg,
[SLAVE_UFS_CFG] = &slv_ufs_cfg,
[SLAVE_A1NOC_CFG] = &slv_a1noc_cfg,
[SLAVE_A1NOC_MPU_CFG] = &slv_a1noc_mpu_cfg,
[SLAVE_A2NOC_CFG] = &slv_a2noc_cfg,
[SLAVE_A2NOC_MPU_CFG] = &slv_a2noc_mpu_cfg,
[SLAVE_SSC_CFG] = &slv_ssc_cfg,
[SLAVE_A0NOC_SMMU_CFG] = &slv_a0noc_smmu_cfg,
[SLAVE_A1NOC_SMMU_CFG] = &slv_a1noc_smmu_cfg,
[SLAVE_A2NOC_SMMU_CFG] = &slv_a2noc_smmu_cfg,
[SLAVE_LPASS_SMMU_CFG] = &slv_lpass_smmu_cfg,
[SLAVE_CNOC_MNOC_MMSS_CFG] = &slv_cnoc_mnoc_mmss_cfg
};
static const struct regmap_config msm8996_cnoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1000,
.fast_io = true
};
static const struct qcom_icc_desc msm8996_cnoc = {
.type = QCOM_ICC_NOC,
.nodes = cnoc_nodes,
.num_nodes = ARRAY_SIZE(cnoc_nodes),
.bus_clk_desc = &bus_2_clk,
.regmap_cfg = &msm8996_cnoc_regmap_config
};
static struct qcom_icc_node * const mnoc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &mas_cnoc_mnoc_cfg,
[MASTER_CPP] = &mas_cpp,
[MASTER_JPEG] = &mas_jpeg,
[MASTER_MDP_PORT0] = &mas_mdp_p0,
[MASTER_MDP_PORT1] = &mas_mdp_p1,
[MASTER_ROTATOR] = &mas_rotator,
[MASTER_VIDEO_P0] = &mas_venus,
[MASTER_VFE] = &mas_vfe,
[MASTER_SNOC_VMEM] = &mas_snoc_vmem,
[MASTER_VIDEO_P0_OCMEM] = &mas_venus_vmem,
[MASTER_CNOC_MNOC_MMSS_CFG] = &mas_cnoc_mnoc_mmss_cfg,
[SLAVE_MNOC_BIMC] = &slv_mnoc_bimc,
[SLAVE_VMEM] = &slv_vmem,
[SLAVE_SERVICE_MNOC] = &slv_srvc_mnoc,
[SLAVE_MMAGIC_CFG] = &slv_mmagic_cfg,
[SLAVE_CPR_CFG] = &slv_cpr_cfg,
[SLAVE_MISC_CFG] = &slv_misc_cfg,
[SLAVE_VENUS_THROTTLE_CFG] = &slv_venus_throttle_cfg,
[SLAVE_VENUS_CFG] = &slv_venus_cfg,
[SLAVE_VMEM_CFG] = &slv_vmem_cfg,
[SLAVE_DSA_CFG] = &slv_dsa_cfg,
[SLAVE_MMSS_CLK_CFG] = &slv_mnoc_clocks_cfg,
[SLAVE_DSA_MPU_CFG] = &slv_dsa_mpu_cfg,
[SLAVE_MNOC_MPU_CFG] = &slv_mnoc_mpu_cfg,
[SLAVE_DISPLAY_CFG] = &slv_display_cfg,
[SLAVE_DISPLAY_THROTTLE_CFG] = &slv_display_throttle_cfg,
[SLAVE_CAMERA_CFG] = &slv_camera_cfg,
[SLAVE_CAMERA_THROTTLE_CFG] = &slv_camera_throttle_cfg,
[SLAVE_GRAPHICS_3D_CFG] = &slv_oxili_cfg,
[SLAVE_SMMU_MDP_CFG] = &slv_smmu_mdp_cfg,
[SLAVE_SMMU_ROT_CFG] = &slv_smmu_rot_cfg,
[SLAVE_SMMU_VENUS_CFG] = &slv_smmu_venus_cfg,
[SLAVE_SMMU_CPP_CFG] = &slv_smmu_cpp_cfg,
[SLAVE_SMMU_JPEG_CFG] = &slv_smmu_jpeg_cfg,
[SLAVE_SMMU_VFE_CFG] = &slv_smmu_vfe_cfg
};
static const struct regmap_config msm8996_mnoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1c000,
.fast_io = true
};
static const struct qcom_icc_desc msm8996_mnoc = {
.type = QCOM_ICC_NOC,
.nodes = mnoc_nodes,
.num_nodes = ARRAY_SIZE(mnoc_nodes),
.bus_clk_desc = &mmaxi_0_clk,
.intf_clocks = mm_intf_clocks,
.num_intf_clocks = ARRAY_SIZE(mm_intf_clocks),
.regmap_cfg = &msm8996_mnoc_regmap_config
};
static struct qcom_icc_node * const pnoc_nodes[] = {
[MASTER_SNOC_PNOC] = &mas_snoc_pnoc,
[MASTER_SDCC_1] = &mas_sdcc_1,
[MASTER_SDCC_2] = &mas_sdcc_2,
[MASTER_SDCC_4] = &mas_sdcc_4,
[MASTER_USB_HS] = &mas_usb_hs,
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_BLSP_2] = &mas_blsp_2,
[MASTER_TSIF] = &mas_tsif,
[SLAVE_PNOC_A1NOC] = &slv_pnoc_a1noc,
[SLAVE_USB_HS] = &slv_usb_hs,
[SLAVE_SDCC_2] = &slv_sdcc_2,
[SLAVE_SDCC_4] = &slv_sdcc_4,
[SLAVE_TSIF] = &slv_tsif,
[SLAVE_BLSP_2] = &slv_blsp_2,
[SLAVE_SDCC_1] = &slv_sdcc_1,
[SLAVE_BLSP_1] = &slv_blsp_1,
[SLAVE_PDM] = &slv_pdm,
[SLAVE_AHB2PHY] = &slv_ahb2phy
};
static const struct regmap_config msm8996_pnoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x3000,
.fast_io = true
};
static const struct qcom_icc_desc msm8996_pnoc = {
.type = QCOM_ICC_NOC,
.nodes = pnoc_nodes,
.num_nodes = ARRAY_SIZE(pnoc_nodes),
.bus_clk_desc = &bus_0_clk,
.regmap_cfg = &msm8996_pnoc_regmap_config
};
static struct qcom_icc_node * const snoc_nodes[] = {
[MASTER_HMSS] = &mas_hmss,
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_SNOC_CFG] = &mas_snoc_cfg,
[MASTER_BIMC_SNOC_0] = &mas_bimc_snoc_0,
[MASTER_BIMC_SNOC_1] = &mas_bimc_snoc_1,
[MASTER_A0NOC_SNOC] = &mas_a0noc_snoc,
[MASTER_A1NOC_SNOC] = &mas_a1noc_snoc,
[MASTER_A2NOC_SNOC] = &mas_a2noc_snoc,
[MASTER_QDSS_ETR] = &mas_qdss_etr,
[SLAVE_A0NOC_SNOC] = &slv_a0noc_snoc,
[SLAVE_A1NOC_SNOC] = &slv_a1noc_snoc,
[SLAVE_A2NOC_SNOC] = &slv_a2noc_snoc,
[SLAVE_HMSS] = &slv_hmss,
[SLAVE_LPASS] = &slv_lpass,
[SLAVE_USB3] = &slv_usb3,
[SLAVE_SNOC_BIMC] = &slv_snoc_bimc,
[SLAVE_SNOC_CNOC] = &slv_snoc_cnoc,
[SLAVE_IMEM] = &slv_imem,
[SLAVE_PIMEM] = &slv_pimem,
[SLAVE_SNOC_VMEM] = &slv_snoc_vmem,
[SLAVE_SNOC_PNOC] = &slv_snoc_pnoc,
[SLAVE_QDSS_STM] = &slv_qdss_stm,
[SLAVE_PCIE_0] = &slv_pcie_0,
[SLAVE_PCIE_1] = &slv_pcie_1,
[SLAVE_PCIE_2] = &slv_pcie_2,
[SLAVE_SERVICE_SNOC] = &slv_srvc_snoc
};
static const struct regmap_config msm8996_snoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x20000,
.fast_io = true
};
static const struct qcom_icc_desc msm8996_snoc = {
.type = QCOM_ICC_NOC,
.nodes = snoc_nodes,
.num_nodes = ARRAY_SIZE(snoc_nodes),
.bus_clk_desc = &bus_1_clk,
.regmap_cfg = &msm8996_snoc_regmap_config
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,msm8996-a0noc", .data = &msm8996_a0noc},
{ .compatible = "qcom,msm8996-a1noc", .data = &msm8996_a1noc},
{ .compatible = "qcom,msm8996-a2noc", .data = &msm8996_a2noc},
{ .compatible = "qcom,msm8996-bimc", .data = &msm8996_bimc},
{ .compatible = "qcom,msm8996-cnoc", .data = &msm8996_cnoc},
{ .compatible = "qcom,msm8996-mnoc", .data = &msm8996_mnoc},
{ .compatible = "qcom,msm8996-pnoc", .data = &msm8996_pnoc},
{ .compatible = "qcom,msm8996-snoc", .data = &msm8996_snoc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qnoc_probe,
.remove = qnoc_remove,
.driver = {
.name = "qnoc-msm8996",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
}
};
static int __init qnoc_driver_init(void)
{
return platform_driver_register(&qnoc_driver);
}
core_initcall(qnoc_driver_init);
static void __exit qnoc_driver_exit(void)
{
platform_driver_unregister(&qnoc_driver);
}
module_exit(qnoc_driver_exit);
MODULE_AUTHOR("Yassine Oudjana <[email protected]>");
MODULE_DESCRIPTION("Qualcomm MSM8996 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/msm8996.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
#include <asm/div64.h>
#include <linux/interconnect-provider.h>
#include <linux/list_sort.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <soc/qcom/rpmh.h>
#include <soc/qcom/tcs.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
static LIST_HEAD(bcm_voters);
static DEFINE_MUTEX(bcm_voter_lock);
/**
* struct bcm_voter - Bus Clock Manager voter
* @dev: reference to the device that communicates with the BCM
* @np: reference to the device node to match bcm voters
* @lock: mutex to protect commit and wake/sleep lists in the voter
* @commit_list: list containing bcms to be committed to hardware
* @ws_list: list containing bcms that have different wake/sleep votes
* @voter_node: list of bcm voters
* @tcs_wait: mask for which buckets require TCS completion
*/
struct bcm_voter {
struct device *dev;
struct device_node *np;
struct mutex lock;
struct list_head commit_list;
struct list_head ws_list;
struct list_head voter_node;
u32 tcs_wait;
};
static int cmp_vcd(void *priv, const struct list_head *a, const struct list_head *b)
{
const struct qcom_icc_bcm *bcm_a = list_entry(a, struct qcom_icc_bcm, list);
const struct qcom_icc_bcm *bcm_b = list_entry(b, struct qcom_icc_bcm, list);
return bcm_a->aux_data.vcd - bcm_b->aux_data.vcd;
}
static u64 bcm_div(u64 num, u32 base)
{
/* Ensure that small votes aren't lost. */
if (num && num < base)
return 1;
do_div(num, base);
return num;
}
/* BCMs with enable_mask use one-hot-encoding for on/off signaling */
static void bcm_aggregate_mask(struct qcom_icc_bcm *bcm)
{
struct qcom_icc_node *node;
int bucket, i;
for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
bcm->vote_x[bucket] = 0;
bcm->vote_y[bucket] = 0;
for (i = 0; i < bcm->num_nodes; i++) {
node = bcm->nodes[i];
/* If any vote in this bucket exists, keep the BCM enabled */
if (node->sum_avg[bucket] || node->max_peak[bucket]) {
bcm->vote_x[bucket] = 0;
bcm->vote_y[bucket] = bcm->enable_mask;
break;
}
}
}
if (bcm->keepalive) {
bcm->vote_x[QCOM_ICC_BUCKET_AMC] = bcm->enable_mask;
bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = bcm->enable_mask;
bcm->vote_y[QCOM_ICC_BUCKET_AMC] = bcm->enable_mask;
bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = bcm->enable_mask;
}
}
static void bcm_aggregate(struct qcom_icc_bcm *bcm)
{
struct qcom_icc_node *node;
size_t i, bucket;
u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
u64 temp;
for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
for (i = 0; i < bcm->num_nodes; i++) {
node = bcm->nodes[i];
temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width,
node->buswidth * node->channels);
agg_avg[bucket] = max(agg_avg[bucket], temp);
temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width,
node->buswidth);
agg_peak[bucket] = max(agg_peak[bucket], temp);
}
temp = agg_avg[bucket] * bcm->vote_scale;
bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit);
temp = agg_peak[bucket] * bcm->vote_scale;
bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
}
if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) {
bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1;
bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1;
bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1;
bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1;
}
}
static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y,
u32 addr, bool commit, bool wait)
{
bool valid = true;
if (!cmd)
return;
memset(cmd, 0, sizeof(*cmd));
if (vote_x == 0 && vote_y == 0)
valid = false;
if (vote_x > BCM_TCS_CMD_VOTE_MASK)
vote_x = BCM_TCS_CMD_VOTE_MASK;
if (vote_y > BCM_TCS_CMD_VOTE_MASK)
vote_y = BCM_TCS_CMD_VOTE_MASK;
cmd->addr = addr;
cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y);
/*
* Set the wait for completion flag on command that need to be completed
* before the next command.
*/
cmd->wait = wait;
}
static void tcs_list_gen(struct bcm_voter *voter, int bucket,
struct tcs_cmd tcs_list[MAX_VCD],
int n[MAX_VCD + 1])
{
struct list_head *bcm_list = &voter->commit_list;
struct qcom_icc_bcm *bcm;
bool commit, wait;
size_t idx = 0, batch = 0, cur_vcd_size = 0;
memset(n, 0, sizeof(int) * (MAX_VCD + 1));
list_for_each_entry(bcm, bcm_list, list) {
commit = false;
cur_vcd_size++;
if ((list_is_last(&bcm->list, bcm_list)) ||
bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) {
commit = true;
cur_vcd_size = 0;
}
wait = commit && (voter->tcs_wait & BIT(bucket));
tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket],
bcm->vote_y[bucket], bcm->addr, commit, wait);
idx++;
n[batch]++;
/*
* Batch the BCMs in such a way that we do not split them in
* multiple payloads when they are under the same VCD. This is
* to ensure that every BCM is committed since we only set the
* commit bit on the last BCM request of every VCD.
*/
if (n[batch] >= MAX_RPMH_PAYLOAD) {
if (!commit) {
n[batch] -= cur_vcd_size;
n[batch + 1] = cur_vcd_size;
}
batch++;
}
}
}
/**
* of_bcm_voter_get - gets a bcm voter handle from DT node
* @dev: device pointer for the consumer device
* @name: name for the bcm voter device
*
* This function will match a device_node pointer for the phandle
* specified in the device DT and return a bcm_voter handle on success.
*
* Returns bcm_voter pointer or ERR_PTR() on error. EPROBE_DEFER is returned
* when matching bcm voter is yet to be found.
*/
struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name)
{
struct bcm_voter *voter = ERR_PTR(-EPROBE_DEFER);
struct bcm_voter *temp;
struct device_node *np, *node;
int idx = 0;
if (!dev || !dev->of_node)
return ERR_PTR(-ENODEV);
np = dev->of_node;
if (name) {
idx = of_property_match_string(np, "qcom,bcm-voter-names", name);
if (idx < 0)
return ERR_PTR(idx);
}
node = of_parse_phandle(np, "qcom,bcm-voters", idx);
mutex_lock(&bcm_voter_lock);
list_for_each_entry(temp, &bcm_voters, voter_node) {
if (temp->np == node) {
voter = temp;
break;
}
}
mutex_unlock(&bcm_voter_lock);
of_node_put(node);
return voter;
}
EXPORT_SYMBOL_GPL(of_bcm_voter_get);
/**
* qcom_icc_bcm_voter_add - queues up the bcm nodes that require updates
* @voter: voter that the bcms are being added to
* @bcm: bcm to add to the commit and wake sleep list
*/
void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm)
{
if (!voter)
return;
mutex_lock(&voter->lock);
if (list_empty(&bcm->list))
list_add_tail(&bcm->list, &voter->commit_list);
if (list_empty(&bcm->ws_list))
list_add_tail(&bcm->ws_list, &voter->ws_list);
mutex_unlock(&voter->lock);
}
EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_add);
/**
* qcom_icc_bcm_voter_commit - generates and commits tcs cmds based on bcms
* @voter: voter that needs flushing
*
* This function generates a set of AMC commands and flushes to the BCM device
* associated with the voter. It conditionally generate WAKE and SLEEP commands
* based on deltas between WAKE/SLEEP requirements. The ws_list persists
* through multiple commit requests and bcm nodes are removed only when the
* requirements for WAKE matches SLEEP.
*
* Returns 0 on success, or an appropriate error code otherwise.
*/
int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
{
struct qcom_icc_bcm *bcm;
struct qcom_icc_bcm *bcm_tmp;
int commit_idx[MAX_VCD + 1];
struct tcs_cmd cmds[MAX_BCMS];
int ret = 0;
if (!voter)
return 0;
mutex_lock(&voter->lock);
list_for_each_entry(bcm, &voter->commit_list, list) {
if (bcm->enable_mask)
bcm_aggregate_mask(bcm);
else
bcm_aggregate(bcm);
}
/*
* Pre sort the BCMs based on VCD for ease of generating a command list
* that groups the BCMs with the same VCD together. VCDs are numbered
* with lowest being the most expensive time wise, ensuring that
* those commands are being sent the earliest in the queue. This needs
* to be sorted every commit since we can't guarantee the order in which
* the BCMs are added to the list.
*/
list_sort(NULL, &voter->commit_list, cmp_vcd);
/*
* Construct the command list based on a pre ordered list of BCMs
* based on VCD.
*/
tcs_list_gen(voter, QCOM_ICC_BUCKET_AMC, cmds, commit_idx);
if (!commit_idx[0])
goto out;
rpmh_invalidate(voter->dev);
ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE,
cmds, commit_idx);
if (ret) {
pr_err("Error sending AMC RPMH requests (%d)\n", ret);
goto out;
}
list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
list_del_init(&bcm->list);
list_for_each_entry_safe(bcm, bcm_tmp, &voter->ws_list, ws_list) {
/*
* Only generate WAKE and SLEEP commands if a resource's
* requirements change as the execution environment transitions
* between different power states.
*/
if (bcm->vote_x[QCOM_ICC_BUCKET_WAKE] !=
bcm->vote_x[QCOM_ICC_BUCKET_SLEEP] ||
bcm->vote_y[QCOM_ICC_BUCKET_WAKE] !=
bcm->vote_y[QCOM_ICC_BUCKET_SLEEP])
list_add_tail(&bcm->list, &voter->commit_list);
else
list_del_init(&bcm->ws_list);
}
if (list_empty(&voter->commit_list))
goto out;
list_sort(NULL, &voter->commit_list, cmp_vcd);
tcs_list_gen(voter, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx);
ret = rpmh_write_batch(voter->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx);
if (ret) {
pr_err("Error sending WAKE RPMH requests (%d)\n", ret);
goto out;
}
tcs_list_gen(voter, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx);
ret = rpmh_write_batch(voter->dev, RPMH_SLEEP_STATE, cmds, commit_idx);
if (ret) {
pr_err("Error sending SLEEP RPMH requests (%d)\n", ret);
goto out;
}
out:
list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list)
list_del_init(&bcm->list);
mutex_unlock(&voter->lock);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_commit);
static int qcom_icc_bcm_voter_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct bcm_voter *voter;
voter = devm_kzalloc(&pdev->dev, sizeof(*voter), GFP_KERNEL);
if (!voter)
return -ENOMEM;
voter->dev = &pdev->dev;
voter->np = np;
if (of_property_read_u32(np, "qcom,tcs-wait", &voter->tcs_wait))
voter->tcs_wait = QCOM_ICC_TAG_ACTIVE_ONLY;
mutex_init(&voter->lock);
INIT_LIST_HEAD(&voter->commit_list);
INIT_LIST_HEAD(&voter->ws_list);
mutex_lock(&bcm_voter_lock);
list_add_tail(&voter->voter_node, &bcm_voters);
mutex_unlock(&bcm_voter_lock);
return 0;
}
static const struct of_device_id bcm_voter_of_match[] = {
{ .compatible = "qcom,bcm-voter" },
{ }
};
MODULE_DEVICE_TABLE(of, bcm_voter_of_match);
static struct platform_driver qcom_icc_bcm_voter_driver = {
.probe = qcom_icc_bcm_voter_probe,
.driver = {
.name = "bcm_voter",
.of_match_table = bcm_voter_of_match,
},
};
module_platform_driver(qcom_icc_bcm_voter_driver);
MODULE_AUTHOR("David Dai <[email protected]>");
MODULE_DESCRIPTION("Qualcomm BCM Voter interconnect driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/bcm-voter.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sc7280.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sc7280.h"
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
.id = SC7280_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
.id = SC7280_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.id = SC7280_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qnm_a1noc_cfg = {
.name = "qnm_a1noc_cfg",
.id = SC7280_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_SLAVE_SERVICE_A1NOC },
};
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
.id = SC7280_MASTER_SDCC_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
.id = SC7280_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
.id = SC7280_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SC7280_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_usb2 = {
.name = "xm_usb2",
.id = SC7280_MASTER_USB2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.id = SC7280_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SC7280_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qnm_a2noc_cfg = {
.name = "qnm_a2noc_cfg",
.id = SC7280_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_SLAVE_SERVICE_A2NOC },
};
static struct qcom_icc_node qnm_cnoc_datapath = {
.name = "qnm_cnoc_datapath",
.id = SC7280_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SC7280_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SC7280_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
.id = SC7280_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
.id = SC7280_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.links = { SC7280_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
.id = SC7280_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = SC7280_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_SLAVE_QUP_CORE_0 },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
.id = SC7280_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_SLAVE_QUP_CORE_1 },
};
static struct qcom_icc_node qnm_cnoc3_cnoc2 = {
.name = "qnm_cnoc3_cnoc2",
.id = SC7280_MASTER_CNOC3_CNOC2,
.channels = 1,
.buswidth = 8,
.num_links = 44,
.links = { SC7280_SLAVE_AHB2PHY_SOUTH, SC7280_SLAVE_AHB2PHY_NORTH,
SC7280_SLAVE_CAMERA_CFG, SC7280_SLAVE_CLK_CTL,
SC7280_SLAVE_CDSP_CFG, SC7280_SLAVE_RBCPR_CX_CFG,
SC7280_SLAVE_RBCPR_MX_CFG, SC7280_SLAVE_CRYPTO_0_CFG,
SC7280_SLAVE_CX_RDPM, SC7280_SLAVE_DCC_CFG,
SC7280_SLAVE_DISPLAY_CFG, SC7280_SLAVE_GFX3D_CFG,
SC7280_SLAVE_HWKM, SC7280_SLAVE_IMEM_CFG,
SC7280_SLAVE_IPA_CFG, SC7280_SLAVE_IPC_ROUTER_CFG,
SC7280_SLAVE_LPASS, SC7280_SLAVE_CNOC_MSS,
SC7280_SLAVE_MX_RDPM, SC7280_SLAVE_PCIE_0_CFG,
SC7280_SLAVE_PCIE_1_CFG, SC7280_SLAVE_PDM,
SC7280_SLAVE_PIMEM_CFG, SC7280_SLAVE_PKA_WRAPPER_CFG,
SC7280_SLAVE_PMU_WRAPPER_CFG, SC7280_SLAVE_QDSS_CFG,
SC7280_SLAVE_QSPI_0, SC7280_SLAVE_QUP_0,
SC7280_SLAVE_QUP_1, SC7280_SLAVE_SDCC_1,
SC7280_SLAVE_SDCC_2, SC7280_SLAVE_SDCC_4,
SC7280_SLAVE_SECURITY, SC7280_SLAVE_TCSR,
SC7280_SLAVE_TLMM, SC7280_SLAVE_UFS_MEM_CFG,
SC7280_SLAVE_USB2, SC7280_SLAVE_USB3_0,
SC7280_SLAVE_VENUS_CFG, SC7280_SLAVE_VSENSE_CTRL_CFG,
SC7280_SLAVE_A1NOC_CFG, SC7280_SLAVE_A2NOC_CFG,
SC7280_SLAVE_CNOC_MNOC_CFG, SC7280_SLAVE_SNOC_CFG },
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
.id = SC7280_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 45,
.links = { SC7280_SLAVE_AHB2PHY_SOUTH, SC7280_SLAVE_AHB2PHY_NORTH,
SC7280_SLAVE_CAMERA_CFG, SC7280_SLAVE_CLK_CTL,
SC7280_SLAVE_CDSP_CFG, SC7280_SLAVE_RBCPR_CX_CFG,
SC7280_SLAVE_RBCPR_MX_CFG, SC7280_SLAVE_CRYPTO_0_CFG,
SC7280_SLAVE_CX_RDPM, SC7280_SLAVE_DCC_CFG,
SC7280_SLAVE_DISPLAY_CFG, SC7280_SLAVE_GFX3D_CFG,
SC7280_SLAVE_HWKM, SC7280_SLAVE_IMEM_CFG,
SC7280_SLAVE_IPA_CFG, SC7280_SLAVE_IPC_ROUTER_CFG,
SC7280_SLAVE_LPASS, SC7280_SLAVE_CNOC_MSS,
SC7280_SLAVE_MX_RDPM, SC7280_SLAVE_PCIE_0_CFG,
SC7280_SLAVE_PCIE_1_CFG, SC7280_SLAVE_PDM,
SC7280_SLAVE_PIMEM_CFG, SC7280_SLAVE_PKA_WRAPPER_CFG,
SC7280_SLAVE_PMU_WRAPPER_CFG, SC7280_SLAVE_QDSS_CFG,
SC7280_SLAVE_QSPI_0, SC7280_SLAVE_QUP_0,
SC7280_SLAVE_QUP_1, SC7280_SLAVE_SDCC_1,
SC7280_SLAVE_SDCC_2, SC7280_SLAVE_SDCC_4,
SC7280_SLAVE_SECURITY, SC7280_SLAVE_TCSR,
SC7280_SLAVE_TLMM, SC7280_SLAVE_UFS_MEM_CFG,
SC7280_SLAVE_USB2, SC7280_SLAVE_USB3_0,
SC7280_SLAVE_VENUS_CFG, SC7280_SLAVE_VSENSE_CTRL_CFG,
SC7280_SLAVE_A1NOC_CFG, SC7280_SLAVE_A2NOC_CFG,
SC7280_SLAVE_CNOC2_CNOC3, SC7280_SLAVE_CNOC_MNOC_CFG,
SC7280_SLAVE_SNOC_CFG },
};
static struct qcom_icc_node qnm_cnoc2_cnoc3 = {
.name = "qnm_cnoc2_cnoc3",
.id = SC7280_MASTER_CNOC2_CNOC3,
.channels = 1,
.buswidth = 8,
.num_links = 9,
.links = { SC7280_SLAVE_AOSS, SC7280_SLAVE_APPSS,
SC7280_SLAVE_CNOC_A2NOC, SC7280_SLAVE_DDRSS_CFG,
SC7280_SLAVE_BOOT_IMEM, SC7280_SLAVE_IMEM,
SC7280_SLAVE_PIMEM, SC7280_SLAVE_QDSS_STM,
SC7280_SLAVE_TCU },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
.id = SC7280_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 9,
.links = { SC7280_SLAVE_AOSS, SC7280_SLAVE_APPSS,
SC7280_SLAVE_CNOC3_CNOC2, SC7280_SLAVE_DDRSS_CFG,
SC7280_SLAVE_BOOT_IMEM, SC7280_SLAVE_IMEM,
SC7280_SLAVE_PIMEM, SC7280_SLAVE_QDSS_STM,
SC7280_SLAVE_TCU },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
.id = SC7280_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SC7280_SLAVE_PCIE_0, SC7280_SLAVE_PCIE_1 },
};
static struct qcom_icc_node qnm_cnoc_dc_noc = {
.name = "qnm_cnoc_dc_noc",
.id = SC7280_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SC7280_SLAVE_LLCC_CFG, SC7280_SLAVE_GEM_NOC_CFG },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
.id = SC7280_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
.id = SC7280_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
.id = SC7280_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 3,
.links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC,
SC7280_SLAVE_MEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
.id = SC7280_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_gemnoc_cfg = {
.name = "qnm_gemnoc_cfg",
.id = SC7280_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 5,
.links = { SC7280_SLAVE_MSS_PROC_MS_MPU_CFG, SC7280_SLAVE_MCDMA_MS_MPU_CFG,
SC7280_SLAVE_SERVICE_GEM_NOC_1, SC7280_SLAVE_SERVICE_GEM_NOC_2,
SC7280_SLAVE_SERVICE_GEM_NOC },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
.id = SC7280_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SC7280_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC7280_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SC7280_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
.links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
.id = SC7280_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
.links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SC7280_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SC7280_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC,
SC7280_SLAVE_MEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qhm_config_noc = {
.name = "qhm_config_noc",
.id = SC7280_MASTER_CNOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 6,
.links = { SC7280_SLAVE_LPASS_CORE_CFG, SC7280_SLAVE_LPASS_LPI_CFG,
SC7280_SLAVE_LPASS_MPU_CFG, SC7280_SLAVE_LPASS_TOP_CFG,
SC7280_SLAVE_SERVICES_LPASS_AML_NOC, SC7280_SLAVE_SERVICE_LPASS_AG_NOC },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SC7280_MASTER_LLCC,
.channels = 2,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_SLAVE_EBI1 },
};
static struct qcom_icc_node qnm_mnoc_cfg = {
.name = "qnm_mnoc_cfg",
.id = SC7280_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_SLAVE_SERVICE_MNOC },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
.id = SC7280_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7280_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_cpu = {
.name = "qnm_video_cpu",
.id = SC7280_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_hf = {
.name = "qxm_camnoc_hf",
.id = SC7280_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC7280_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_icp = {
.name = "qxm_camnoc_icp",
.id = SC7280_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
.id = SC7280_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7280_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
.id = SC7280_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7280_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
.name = "qhm_nsp_noc_config",
.id = SC7280_MASTER_CDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_SLAVE_SERVICE_NSP_NOC },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
.id = SC7280_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC7280_SLAVE_CDSP_MEM_NOC },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
.id = SC7280_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7280_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
.id = SC7280_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7280_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_snoc_cfg = {
.name = "qnm_snoc_cfg",
.id = SC7280_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.id = SC7280_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = SC7280_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SC7280_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7280_MASTER_A1NOC_SNOC },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
.id = SC7280_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
.id = SC7280_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7280_MASTER_A2NOC_SNOC },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
.id = SC7280_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7280_MASTER_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
.id = SC7280_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = SC7280_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
.id = SC7280_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
.id = SC7280_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
.id = SC7280_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
.id = SC7280_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SC7280_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_compute_cfg = {
.name = "qhs_compute_cfg",
.id = SC7280_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_MASTER_CDSP_NOC_CFG },
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = SC7280_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
.id = SC7280_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SC7280_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
.id = SC7280_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
.id = SC7280_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
.id = SC7280_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
.id = SC7280_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qhs_hwkm = {
.name = "qhs_hwkm",
.id = SC7280_SLAVE_HWKM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SC7280_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SC7280_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
.id = SC7280_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
.id = SC7280_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_MASTER_CNOC_LPASS_AG_NOC },
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
.id = SC7280_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
.id = SC7280_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
.id = SC7280_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
.id = SC7280_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SC7280_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = SC7280_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pka_wrapper_cfg = {
.name = "qhs_pka_wrapper_cfg",
.id = SC7280_SLAVE_PKA_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pmu_wrapper_cfg = {
.name = "qhs_pmu_wrapper_cfg",
.id = SC7280_SLAVE_PMU_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SC7280_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
.id = SC7280_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
.id = SC7280_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
.id = SC7280_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_sdc1 = {
.name = "qhs_sdc1",
.id = SC7280_SLAVE_SDCC_1,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
.id = SC7280_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
.id = SC7280_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
.id = SC7280_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SC7280_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
.id = SC7280_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
.id = SC7280_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_usb2 = {
.name = "qhs_usb2",
.id = SC7280_SLAVE_USB2,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
.id = SC7280_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
.id = SC7280_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
.id = SC7280_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_a1_noc_cfg = {
.name = "qns_a1_noc_cfg",
.id = SC7280_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_MASTER_A1NOC_CFG },
};
static struct qcom_icc_node qns_a2_noc_cfg = {
.name = "qns_a2_noc_cfg",
.id = SC7280_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_MASTER_A2NOC_CFG },
};
static struct qcom_icc_node qns_cnoc2_cnoc3 = {
.name = "qns_cnoc2_cnoc3",
.id = SC7280_SLAVE_CNOC2_CNOC3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_MASTER_CNOC2_CNOC3 },
};
static struct qcom_icc_node qns_mnoc_cfg = {
.name = "qns_mnoc_cfg",
.id = SC7280_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_MASTER_CNOC_MNOC_CFG },
};
static struct qcom_icc_node qns_snoc_cfg = {
.name = "qns_snoc_cfg",
.id = SC7280_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SC7280_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SC7280_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qns_cnoc3_cnoc2 = {
.name = "qns_cnoc3_cnoc2",
.id = SC7280_SLAVE_CNOC3_CNOC2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_MASTER_CNOC3_CNOC2 },
};
static struct qcom_icc_node qns_cnoc_a2noc = {
.name = "qns_cnoc_a2noc",
.id = SC7280_SLAVE_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_MASTER_CNOC_A2NOC },
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
.id = SC7280_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_MASTER_CNOC_DC_NOC },
};
static struct qcom_icc_node qxs_boot_imem = {
.name = "qxs_boot_imem",
.id = SC7280_SLAVE_BOOT_IMEM,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SC7280_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
.id = SC7280_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
.id = SC7280_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
.id = SC7280_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SC7280_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SC7280_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
.id = SC7280_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_gemnoc = {
.name = "qns_gemnoc",
.id = SC7280_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7280_MASTER_GEM_NOC_CFG },
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
.id = SC7280_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_modem_ms_mpu_cfg = {
.name = "qhs_modem_ms_mpu_cfg",
.id = SC7280_SLAVE_MCDMA_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
.id = SC7280_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7280_MASTER_GEM_NOC_CNOC },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SC7280_SLAVE_LLCC,
.channels = 2,
.buswidth = 16,
.num_links = 1,
.links = { SC7280_MASTER_LLCC },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
.id = SC7280_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_MASTER_GEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node srvc_even_gemnoc = {
.name = "srvc_even_gemnoc",
.id = SC7280_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node srvc_odd_gemnoc = {
.name = "srvc_odd_gemnoc",
.id = SC7280_SLAVE_SERVICE_GEM_NOC_2,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node srvc_sys_gemnoc = {
.name = "srvc_sys_gemnoc",
.id = SC7280_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_lpass_core = {
.name = "qhs_lpass_core",
.id = SC7280_SLAVE_LPASS_CORE_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_lpass_lpi = {
.name = "qhs_lpass_lpi",
.id = SC7280_SLAVE_LPASS_LPI_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_lpass_mpu = {
.name = "qhs_lpass_mpu",
.id = SC7280_SLAVE_LPASS_MPU_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_lpass_top = {
.name = "qhs_lpass_top",
.id = SC7280_SLAVE_LPASS_TOP_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node srvc_niu_aml_noc = {
.name = "srvc_niu_aml_noc",
.id = SC7280_SLAVE_SERVICES_LPASS_AML_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node srvc_niu_lpass_agnoc = {
.name = "srvc_niu_lpass_agnoc",
.id = SC7280_SLAVE_SERVICE_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SC7280_SLAVE_EBI1,
.channels = 2,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
.id = SC7280_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC7280_MASTER_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
.id = SC7280_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7280_MASTER_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
.id = SC7280_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
.id = SC7280_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC7280_MASTER_COMPUTE_NOC },
};
static struct qcom_icc_node service_nsp_noc = {
.name = "service_nsp_noc",
.id = SC7280_SLAVE_SERVICE_NSP_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
.id = SC7280_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7280_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
.id = SC7280_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7280_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SC7280_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.keepalive = true,
.num_nodes = 2,
.nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie },
};
static struct qcom_icc_bcm bcm_cn1 = {
.name = "CN1",
.num_nodes = 47,
.nodes = { &qnm_cnoc3_cnoc2, &xm_qdss_dap,
&qhs_ahb2phy0, &qhs_ahb2phy1,
&qhs_camera_cfg, &qhs_clk_ctl,
&qhs_compute_cfg, &qhs_cpr_cx,
&qhs_cpr_mx, &qhs_crypto0_cfg,
&qhs_cx_rdpm, &qhs_dcc_cfg,
&qhs_display_cfg, &qhs_gpuss_cfg,
&qhs_hwkm, &qhs_imem_cfg,
&qhs_ipa, &qhs_ipc_router,
&qhs_mss_cfg, &qhs_mx_rdpm,
&qhs_pcie0_cfg, &qhs_pcie1_cfg,
&qhs_pimem_cfg, &qhs_pka_wrapper_cfg,
&qhs_pmu_wrapper_cfg, &qhs_qdss_cfg,
&qhs_qup0, &qhs_qup1,
&qhs_security, &qhs_tcsr,
&qhs_tlmm, &qhs_ufs_mem_cfg, &qhs_usb2,
&qhs_usb3_0, &qhs_venus_cfg,
&qhs_vsense_ctrl_cfg, &qns_a1_noc_cfg,
&qns_a2_noc_cfg, &qns_cnoc2_cnoc3,
&qns_mnoc_cfg, &qns_snoc_cfg,
&qnm_cnoc2_cnoc3, &qhs_aoss,
&qhs_apss, &qns_cnoc3_cnoc2,
&qns_cnoc_a2noc, &qns_ddrss_cfg },
};
static struct qcom_icc_bcm bcm_cn2 = {
.name = "CN2",
.num_nodes = 6,
.nodes = { &qhs_lpass_cfg, &qhs_pdm,
&qhs_qspi, &qhs_sdc1,
&qhs_sdc2, &qhs_sdc4 },
};
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
.num_nodes = 1,
.nodes = { &qns_nsp_gemnoc },
};
static struct qcom_icc_bcm bcm_co3 = {
.name = "CO3",
.num_nodes = 1,
.nodes = { &qxm_nsp },
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.num_nodes = 2,
.nodes = { &qxm_camnoc_hf, &qxm_mdp0 },
};
static struct qcom_icc_bcm bcm_mm4 = {
.name = "MM4",
.num_nodes = 1,
.nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_bcm bcm_mm5 = {
.name = "MM5",
.num_nodes = 3,
.nodes = { &qnm_video0, &qxm_camnoc_icp,
&qxm_camnoc_sf },
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup0_core_slave },
};
static struct qcom_icc_bcm bcm_qup1 = {
.name = "QUP1",
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup1_core_slave },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_sh2 = {
.name = "SH2",
.num_nodes = 2,
.nodes = { &alm_gpu_tcu, &alm_sys_tcu },
};
static struct qcom_icc_bcm bcm_sh3 = {
.name = "SH3",
.num_nodes = 1,
.nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_bcm bcm_sh4 = {
.name = "SH4",
.num_nodes = 1,
.nodes = { &chm_apps },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.num_nodes = 1,
.nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.num_nodes = 1,
.nodes = { &qxs_pimem },
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.num_nodes = 1,
.nodes = { &xs_qdss_stm },
};
static struct qcom_icc_bcm bcm_sn5 = {
.name = "SN5",
.num_nodes = 1,
.nodes = { &xm_pcie3_0 },
};
static struct qcom_icc_bcm bcm_sn6 = {
.name = "SN6",
.num_nodes = 1,
.nodes = { &xm_pcie3_1 },
};
static struct qcom_icc_bcm bcm_sn7 = {
.name = "SN7",
.num_nodes = 1,
.nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_bcm bcm_sn8 = {
.name = "SN8",
.num_nodes = 1,
.nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_bcm bcm_sn14 = {
.name = "SN14",
.num_nodes = 1,
.nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn5,
&bcm_sn6,
&bcm_sn14,
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
[MASTER_PCIE_0] = &xm_pcie3_0,
[MASTER_PCIE_1] = &xm_pcie3_1,
[MASTER_SDCC_1] = &xm_sdc1,
[MASTER_SDCC_2] = &xm_sdc2,
[MASTER_SDCC_4] = &xm_sdc4,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[MASTER_USB2] = &xm_usb2,
[MASTER_USB3_0] = &xm_usb3_0,
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
static const struct qcom_icc_desc sc7280_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
[MASTER_CNOC_A2NOC] = &qnm_cnoc_datapath,
[MASTER_CRYPTO] = &qxm_crypto,
[MASTER_IPA] = &qxm_ipa,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
static const struct qcom_icc_desc sc7280_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_qup0,
&bcm_qup1,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
};
static const struct qcom_icc_desc sc7280_clk_virt = {
.nodes = clk_virt_nodes,
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
static struct qcom_icc_bcm * const cnoc2_bcms[] = {
&bcm_cn1,
&bcm_cn2,
};
static struct qcom_icc_node * const cnoc2_nodes[] = {
[MASTER_CNOC3_CNOC2] = &qnm_cnoc3_cnoc2,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
[SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CDSP_CFG] = &qhs_compute_cfg,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_CX_RDPM] = &qhs_cx_rdpm,
[SLAVE_DCC_CFG] = &qhs_dcc_cfg,
[SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
[SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_HWKM] = &qhs_hwkm,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
[SLAVE_LPASS] = &qhs_lpass_cfg,
[SLAVE_CNOC_MSS] = &qhs_mss_cfg,
[SLAVE_MX_RDPM] = &qhs_mx_rdpm,
[SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
[SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PKA_WRAPPER_CFG] = &qhs_pka_wrapper_cfg,
[SLAVE_PMU_WRAPPER_CFG] = &qhs_pmu_wrapper_cfg,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QSPI_0] = &qhs_qspi,
[SLAVE_QUP_0] = &qhs_qup0,
[SLAVE_QUP_1] = &qhs_qup1,
[SLAVE_SDCC_1] = &qhs_sdc1,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SDCC_4] = &qhs_sdc4,
[SLAVE_SECURITY] = &qhs_security,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM] = &qhs_tlmm,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB2] = &qhs_usb2,
[SLAVE_USB3_0] = &qhs_usb3_0,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg,
[SLAVE_CNOC2_CNOC3] = &qns_cnoc2_cnoc3,
[SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg,
[SLAVE_SNOC_CFG] = &qns_snoc_cfg,
};
static const struct qcom_icc_desc sc7280_cnoc2 = {
.nodes = cnoc2_nodes,
.num_nodes = ARRAY_SIZE(cnoc2_nodes),
.bcms = cnoc2_bcms,
.num_bcms = ARRAY_SIZE(cnoc2_bcms),
};
static struct qcom_icc_bcm * const cnoc3_bcms[] = {
&bcm_cn0,
&bcm_cn1,
&bcm_sn3,
&bcm_sn4,
};
static struct qcom_icc_node * const cnoc3_nodes[] = {
[MASTER_CNOC2_CNOC3] = &qnm_cnoc2_cnoc3,
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_APPSS] = &qhs_apss,
[SLAVE_CNOC3_CNOC2] = &qns_cnoc3_cnoc2,
[SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
[SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
[SLAVE_BOOT_IMEM] = &qxs_boot_imem,
[SLAVE_IMEM] = &qxs_imem,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_PCIE_0] = &xs_pcie_0,
[SLAVE_PCIE_1] = &xs_pcie_1,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sc7280_cnoc3 = {
.nodes = cnoc3_nodes,
.num_nodes = ARRAY_SIZE(cnoc3_nodes),
.bcms = cnoc3_bcms,
.num_bcms = ARRAY_SIZE(cnoc3_bcms),
};
static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
};
static const struct qcom_icc_desc sc7280_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
[MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
[MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg,
[MASTER_GFX3D] = &qnm_gpu,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
[SLAVE_MCDMA_MS_MPU_CFG] = &qhs_modem_ms_mpu_cfg,
[SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
[SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
[SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
[SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
};
static const struct qcom_icc_desc sc7280_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
};
static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
[SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
[SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
[SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
[SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
[SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
};
static const struct qcom_icc_desc sc7280_lpass_ag_noc = {
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
static const struct qcom_icc_desc sc7280_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm4,
&bcm_mm5,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
[MASTER_VIDEO_P0] = &qnm_video0,
[MASTER_VIDEO_PROC] = &qnm_video_cpu,
[MASTER_CAMNOC_HF] = &qxm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qxm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
[MASTER_MDP0] = &qxm_mdp0,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
static const struct qcom_icc_desc sc7280_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
&bcm_co0,
&bcm_co3,
};
static struct qcom_icc_node * const nsp_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
[SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
};
static const struct qcom_icc_desc sc7280_nsp_noc = {
.nodes = nsp_noc_nodes,
.num_nodes = ARRAY_SIZE(nsp_noc_nodes),
.bcms = nsp_noc_bcms,
.num_bcms = ARRAY_SIZE(nsp_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn2,
&bcm_sn7,
&bcm_sn8,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_SNOC_CFG] = &qnm_snoc_cfg,
[MASTER_PIMEM] = &qxm_pimem,
[MASTER_GIC] = &xm_gic,
[SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
};
static const struct qcom_icc_desc sc7280_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sc7280-aggre1-noc",
.data = &sc7280_aggre1_noc},
{ .compatible = "qcom,sc7280-aggre2-noc",
.data = &sc7280_aggre2_noc},
{ .compatible = "qcom,sc7280-clk-virt",
.data = &sc7280_clk_virt},
{ .compatible = "qcom,sc7280-cnoc2",
.data = &sc7280_cnoc2},
{ .compatible = "qcom,sc7280-cnoc3",
.data = &sc7280_cnoc3},
{ .compatible = "qcom,sc7280-dc-noc",
.data = &sc7280_dc_noc},
{ .compatible = "qcom,sc7280-gem-noc",
.data = &sc7280_gem_noc},
{ .compatible = "qcom,sc7280-lpass-ag-noc",
.data = &sc7280_lpass_ag_noc},
{ .compatible = "qcom,sc7280-mc-virt",
.data = &sc7280_mc_virt},
{ .compatible = "qcom,sc7280-mmss-noc",
.data = &sc7280_mmss_noc},
{ .compatible = "qcom,sc7280-nsp-noc",
.data = &sc7280_nsp_noc},
{ .compatible = "qcom,sc7280-system-noc",
.data = &sc7280_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sc7280",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
MODULE_DESCRIPTION("SC7280 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/sc7280.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023, Linaro Limited
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sa8775p-rpmh.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#define SA8775P_MASTER_GPU_TCU 0
#define SA8775P_MASTER_PCIE_TCU 1
#define SA8775P_MASTER_SYS_TCU 2
#define SA8775P_MASTER_APPSS_PROC 3
#define SA8775P_MASTER_LLCC 4
#define SA8775P_MASTER_CNOC_LPASS_AG_NOC 5
#define SA8775P_MASTER_GIC_AHB 6
#define SA8775P_MASTER_CDSP_NOC_CFG 7
#define SA8775P_MASTER_CDSPB_NOC_CFG 8
#define SA8775P_MASTER_QDSS_BAM 9
#define SA8775P_MASTER_QUP_0 10
#define SA8775P_MASTER_QUP_1 11
#define SA8775P_MASTER_QUP_2 12
#define SA8775P_MASTER_A1NOC_SNOC 13
#define SA8775P_MASTER_A2NOC_SNOC 14
#define SA8775P_MASTER_CAMNOC_HF 15
#define SA8775P_MASTER_CAMNOC_ICP 16
#define SA8775P_MASTER_CAMNOC_SF 17
#define SA8775P_MASTER_COMPUTE_NOC 18
#define SA8775P_MASTER_COMPUTE_NOC_1 19
#define SA8775P_MASTER_CNOC_A2NOC 20
#define SA8775P_MASTER_CNOC_DC_NOC 21
#define SA8775P_MASTER_GEM_NOC_CFG 22
#define SA8775P_MASTER_GEM_NOC_CNOC 23
#define SA8775P_MASTER_GEM_NOC_PCIE_SNOC 24
#define SA8775P_MASTER_GPDSP_SAIL 25
#define SA8775P_MASTER_GFX3D 26
#define SA8775P_MASTER_LPASS_ANOC 27
#define SA8775P_MASTER_MDP0 28
#define SA8775P_MASTER_MDP1 29
#define SA8775P_MASTER_MDP_CORE1_0 30
#define SA8775P_MASTER_MDP_CORE1_1 31
#define SA8775P_MASTER_MNOC_HF_MEM_NOC 32
#define SA8775P_MASTER_CNOC_MNOC_HF_CFG 33
#define SA8775P_MASTER_MNOC_SF_MEM_NOC 34
#define SA8775P_MASTER_CNOC_MNOC_SF_CFG 35
#define SA8775P_MASTER_ANOC_PCIE_GEM_NOC 36
#define SA8775P_MASTER_SNOC_CFG 37
#define SA8775P_MASTER_SNOC_GC_MEM_NOC 38
#define SA8775P_MASTER_SNOC_SF_MEM_NOC 39
#define SA8775P_MASTER_VIDEO_P0 40
#define SA8775P_MASTER_VIDEO_P1 41
#define SA8775P_MASTER_VIDEO_PROC 42
#define SA8775P_MASTER_VIDEO_V_PROC 43
#define SA8775P_MASTER_QUP_CORE_0 44
#define SA8775P_MASTER_QUP_CORE_1 45
#define SA8775P_MASTER_QUP_CORE_2 46
#define SA8775P_MASTER_QUP_CORE_3 47
#define SA8775P_MASTER_CRYPTO_CORE0 48
#define SA8775P_MASTER_CRYPTO_CORE1 49
#define SA8775P_MASTER_DSP0 50
#define SA8775P_MASTER_DSP1 51
#define SA8775P_MASTER_IPA 52
#define SA8775P_MASTER_LPASS_PROC 53
#define SA8775P_MASTER_CDSP_PROC 54
#define SA8775P_MASTER_CDSP_PROC_B 55
#define SA8775P_MASTER_PIMEM 56
#define SA8775P_MASTER_QUP_3 57
#define SA8775P_MASTER_EMAC 58
#define SA8775P_MASTER_EMAC_1 59
#define SA8775P_MASTER_GIC 60
#define SA8775P_MASTER_PCIE_0 61
#define SA8775P_MASTER_PCIE_1 62
#define SA8775P_MASTER_QDSS_ETR_0 63
#define SA8775P_MASTER_QDSS_ETR_1 64
#define SA8775P_MASTER_SDC 65
#define SA8775P_MASTER_UFS_CARD 66
#define SA8775P_MASTER_UFS_MEM 67
#define SA8775P_MASTER_USB2 68
#define SA8775P_MASTER_USB3_0 69
#define SA8775P_MASTER_USB3_1 70
#define SA8775P_SLAVE_EBI1 512
#define SA8775P_SLAVE_AHB2PHY_0 513
#define SA8775P_SLAVE_AHB2PHY_1 514
#define SA8775P_SLAVE_AHB2PHY_2 515
#define SA8775P_SLAVE_AHB2PHY_3 516
#define SA8775P_SLAVE_ANOC_THROTTLE_CFG 517
#define SA8775P_SLAVE_AOSS 518
#define SA8775P_SLAVE_APPSS 519
#define SA8775P_SLAVE_BOOT_ROM 520
#define SA8775P_SLAVE_CAMERA_CFG 521
#define SA8775P_SLAVE_CAMERA_NRT_THROTTLE_CFG 522
#define SA8775P_SLAVE_CAMERA_RT_THROTTLE_CFG 523
#define SA8775P_SLAVE_CLK_CTL 524
#define SA8775P_SLAVE_CDSP_CFG 525
#define SA8775P_SLAVE_CDSP1_CFG 526
#define SA8775P_SLAVE_RBCPR_CX_CFG 527
#define SA8775P_SLAVE_RBCPR_MMCX_CFG 528
#define SA8775P_SLAVE_RBCPR_MX_CFG 529
#define SA8775P_SLAVE_CPR_NSPCX 530
#define SA8775P_SLAVE_CRYPTO_0_CFG 531
#define SA8775P_SLAVE_CX_RDPM 532
#define SA8775P_SLAVE_DISPLAY_CFG 533
#define SA8775P_SLAVE_DISPLAY_RT_THROTTLE_CFG 534
#define SA8775P_SLAVE_DISPLAY1_CFG 535
#define SA8775P_SLAVE_DISPLAY1_RT_THROTTLE_CFG 536
#define SA8775P_SLAVE_EMAC_CFG 537
#define SA8775P_SLAVE_EMAC1_CFG 538
#define SA8775P_SLAVE_GP_DSP0_CFG 539
#define SA8775P_SLAVE_GP_DSP1_CFG 540
#define SA8775P_SLAVE_GPDSP0_THROTTLE_CFG 541
#define SA8775P_SLAVE_GPDSP1_THROTTLE_CFG 542
#define SA8775P_SLAVE_GPU_TCU_THROTTLE_CFG 543
#define SA8775P_SLAVE_GFX3D_CFG 544
#define SA8775P_SLAVE_HWKM 545
#define SA8775P_SLAVE_IMEM_CFG 546
#define SA8775P_SLAVE_IPA_CFG 547
#define SA8775P_SLAVE_IPC_ROUTER_CFG 548
#define SA8775P_SLAVE_LLCC_CFG 549
#define SA8775P_SLAVE_LPASS 550
#define SA8775P_SLAVE_LPASS_CORE_CFG 551
#define SA8775P_SLAVE_LPASS_LPI_CFG 552
#define SA8775P_SLAVE_LPASS_MPU_CFG 553
#define SA8775P_SLAVE_LPASS_THROTTLE_CFG 554
#define SA8775P_SLAVE_LPASS_TOP_CFG 555
#define SA8775P_SLAVE_MX_RDPM 556
#define SA8775P_SLAVE_MXC_RDPM 557
#define SA8775P_SLAVE_PCIE_0_CFG 558
#define SA8775P_SLAVE_PCIE_1_CFG 559
#define SA8775P_SLAVE_PCIE_RSC_CFG 560
#define SA8775P_SLAVE_PCIE_TCU_THROTTLE_CFG 561
#define SA8775P_SLAVE_PCIE_THROTTLE_CFG 562
#define SA8775P_SLAVE_PDM 563
#define SA8775P_SLAVE_PIMEM_CFG 564
#define SA8775P_SLAVE_PKA_WRAPPER_CFG 565
#define SA8775P_SLAVE_QDSS_CFG 566
#define SA8775P_SLAVE_QM_CFG 567
#define SA8775P_SLAVE_QM_MPU_CFG 568
#define SA8775P_SLAVE_QUP_0 569
#define SA8775P_SLAVE_QUP_1 570
#define SA8775P_SLAVE_QUP_2 571
#define SA8775P_SLAVE_QUP_3 572
#define SA8775P_SLAVE_SAIL_THROTTLE_CFG 573
#define SA8775P_SLAVE_SDC1 574
#define SA8775P_SLAVE_SECURITY 575
#define SA8775P_SLAVE_SNOC_THROTTLE_CFG 576
#define SA8775P_SLAVE_TCSR 577
#define SA8775P_SLAVE_TLMM 578
#define SA8775P_SLAVE_TSC_CFG 579
#define SA8775P_SLAVE_UFS_CARD_CFG 580
#define SA8775P_SLAVE_UFS_MEM_CFG 581
#define SA8775P_SLAVE_USB2 582
#define SA8775P_SLAVE_USB3_0 583
#define SA8775P_SLAVE_USB3_1 584
#define SA8775P_SLAVE_VENUS_CFG 585
#define SA8775P_SLAVE_VENUS_CVP_THROTTLE_CFG 586
#define SA8775P_SLAVE_VENUS_V_CPU_THROTTLE_CFG 587
#define SA8775P_SLAVE_VENUS_VCODEC_THROTTLE_CFG 588
#define SA8775P_SLAVE_A1NOC_SNOC 589
#define SA8775P_SLAVE_A2NOC_SNOC 590
#define SA8775P_SLAVE_DDRSS_CFG 591
#define SA8775P_SLAVE_GEM_NOC_CNOC 592
#define SA8775P_SLAVE_GEM_NOC_CFG 593
#define SA8775P_SLAVE_SNOC_GEM_NOC_GC 594
#define SA8775P_SLAVE_SNOC_GEM_NOC_SF 595
#define SA8775P_SLAVE_GP_DSP_SAIL_NOC 596
#define SA8775P_SLAVE_GPDSP_NOC_CFG 597
#define SA8775P_SLAVE_HCP_A 598
#define SA8775P_SLAVE_LLCC 599
#define SA8775P_SLAVE_MNOC_HF_MEM_NOC 600
#define SA8775P_SLAVE_MNOC_SF_MEM_NOC 601
#define SA8775P_SLAVE_CNOC_MNOC_HF_CFG 602
#define SA8775P_SLAVE_CNOC_MNOC_SF_CFG 603
#define SA8775P_SLAVE_CDSP_MEM_NOC 604
#define SA8775P_SLAVE_CDSPB_MEM_NOC 605
#define SA8775P_SLAVE_HCP_B 606
#define SA8775P_SLAVE_GEM_NOC_PCIE_CNOC 607
#define SA8775P_SLAVE_PCIE_ANOC_CFG 608
#define SA8775P_SLAVE_ANOC_PCIE_GEM_NOC 609
#define SA8775P_SLAVE_SNOC_CFG 610
#define SA8775P_SLAVE_LPASS_SNOC 611
#define SA8775P_SLAVE_QUP_CORE_0 612
#define SA8775P_SLAVE_QUP_CORE_1 613
#define SA8775P_SLAVE_QUP_CORE_2 614
#define SA8775P_SLAVE_QUP_CORE_3 615
#define SA8775P_SLAVE_BOOT_IMEM 616
#define SA8775P_SLAVE_IMEM 617
#define SA8775P_SLAVE_PIMEM 618
#define SA8775P_SLAVE_SERVICE_NSP_NOC 619
#define SA8775P_SLAVE_SERVICE_NSPB_NOC 620
#define SA8775P_SLAVE_SERVICE_GEM_NOC_1 621
#define SA8775P_SLAVE_SERVICE_MNOC_HF 622
#define SA8775P_SLAVE_SERVICE_MNOC_SF 623
#define SA8775P_SLAVE_SERVICES_LPASS_AML_NOC 624
#define SA8775P_SLAVE_SERVICE_LPASS_AG_NOC 625
#define SA8775P_SLAVE_SERVICE_GEM_NOC_2 626
#define SA8775P_SLAVE_SERVICE_SNOC 627
#define SA8775P_SLAVE_SERVICE_GEM_NOC 628
#define SA8775P_SLAVE_SERVICE_GEM_NOC2 629
#define SA8775P_SLAVE_PCIE_0 630
#define SA8775P_SLAVE_PCIE_1 631
#define SA8775P_SLAVE_QDSS_STM 632
#define SA8775P_SLAVE_TCU 633
static struct qcom_icc_node qxm_qup3 = {
.name = "qxm_qup3",
.id = SA8775P_MASTER_QUP_3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_emac_0 = {
.name = "xm_emac_0",
.id = SA8775P_MASTER_EMAC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_emac_1 = {
.name = "xm_emac_1",
.id = SA8775P_MASTER_EMAC_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
.id = SA8775P_MASTER_SDC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SA8775P_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_usb2_2 = {
.name = "xm_usb2_2",
.id = SA8775P_MASTER_USB2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.id = SA8775P_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
.id = SA8775P_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SA8775P_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
.id = SA8775P_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.id = SA8775P_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
.id = SA8775P_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qnm_cnoc_datapath = {
.name = "qnm_cnoc_datapath",
.id = SA8775P_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_crypto_0 = {
.name = "qxm_crypto_0",
.id = SA8775P_MASTER_CRYPTO_CORE0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_crypto_1 = {
.name = "qxm_crypto_1",
.id = SA8775P_MASTER_CRYPTO_CORE1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SA8775P_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
.id = SA8775P_MASTER_QDSS_ETR_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
.id = SA8775P_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_card = {
.name = "xm_ufs_card",
.id = SA8775P_MASTER_UFS_CARD,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = SA8775P_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_QUP_CORE_0 },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
.id = SA8775P_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_QUP_CORE_1 },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
.id = SA8775P_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_QUP_CORE_2 },
};
static struct qcom_icc_node qup3_core_master = {
.name = "qup3_core_master",
.id = SA8775P_MASTER_QUP_CORE_3,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_QUP_CORE_3 },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
.id = SA8775P_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 82,
.links = { SA8775P_SLAVE_AHB2PHY_0,
SA8775P_SLAVE_AHB2PHY_1,
SA8775P_SLAVE_AHB2PHY_2,
SA8775P_SLAVE_AHB2PHY_3,
SA8775P_SLAVE_ANOC_THROTTLE_CFG,
SA8775P_SLAVE_AOSS,
SA8775P_SLAVE_APPSS,
SA8775P_SLAVE_BOOT_ROM,
SA8775P_SLAVE_CAMERA_CFG,
SA8775P_SLAVE_CAMERA_NRT_THROTTLE_CFG,
SA8775P_SLAVE_CAMERA_RT_THROTTLE_CFG,
SA8775P_SLAVE_CLK_CTL,
SA8775P_SLAVE_CDSP_CFG,
SA8775P_SLAVE_CDSP1_CFG,
SA8775P_SLAVE_RBCPR_CX_CFG,
SA8775P_SLAVE_RBCPR_MMCX_CFG,
SA8775P_SLAVE_RBCPR_MX_CFG,
SA8775P_SLAVE_CPR_NSPCX,
SA8775P_SLAVE_CRYPTO_0_CFG,
SA8775P_SLAVE_CX_RDPM,
SA8775P_SLAVE_DISPLAY_CFG,
SA8775P_SLAVE_DISPLAY_RT_THROTTLE_CFG,
SA8775P_SLAVE_DISPLAY1_CFG,
SA8775P_SLAVE_DISPLAY1_RT_THROTTLE_CFG,
SA8775P_SLAVE_EMAC_CFG,
SA8775P_SLAVE_EMAC1_CFG,
SA8775P_SLAVE_GP_DSP0_CFG,
SA8775P_SLAVE_GP_DSP1_CFG,
SA8775P_SLAVE_GPDSP0_THROTTLE_CFG,
SA8775P_SLAVE_GPDSP1_THROTTLE_CFG,
SA8775P_SLAVE_GPU_TCU_THROTTLE_CFG,
SA8775P_SLAVE_GFX3D_CFG,
SA8775P_SLAVE_HWKM,
SA8775P_SLAVE_IMEM_CFG,
SA8775P_SLAVE_IPA_CFG,
SA8775P_SLAVE_IPC_ROUTER_CFG,
SA8775P_SLAVE_LPASS,
SA8775P_SLAVE_LPASS_THROTTLE_CFG,
SA8775P_SLAVE_MX_RDPM,
SA8775P_SLAVE_MXC_RDPM,
SA8775P_SLAVE_PCIE_0_CFG,
SA8775P_SLAVE_PCIE_1_CFG,
SA8775P_SLAVE_PCIE_RSC_CFG,
SA8775P_SLAVE_PCIE_TCU_THROTTLE_CFG,
SA8775P_SLAVE_PCIE_THROTTLE_CFG,
SA8775P_SLAVE_PDM,
SA8775P_SLAVE_PIMEM_CFG,
SA8775P_SLAVE_PKA_WRAPPER_CFG,
SA8775P_SLAVE_QDSS_CFG,
SA8775P_SLAVE_QM_CFG,
SA8775P_SLAVE_QM_MPU_CFG,
SA8775P_SLAVE_QUP_0,
SA8775P_SLAVE_QUP_1,
SA8775P_SLAVE_QUP_2,
SA8775P_SLAVE_QUP_3,
SA8775P_SLAVE_SAIL_THROTTLE_CFG,
SA8775P_SLAVE_SDC1,
SA8775P_SLAVE_SECURITY,
SA8775P_SLAVE_SNOC_THROTTLE_CFG,
SA8775P_SLAVE_TCSR,
SA8775P_SLAVE_TLMM,
SA8775P_SLAVE_TSC_CFG,
SA8775P_SLAVE_UFS_CARD_CFG,
SA8775P_SLAVE_UFS_MEM_CFG,
SA8775P_SLAVE_USB2,
SA8775P_SLAVE_USB3_0,
SA8775P_SLAVE_USB3_1,
SA8775P_SLAVE_VENUS_CFG,
SA8775P_SLAVE_VENUS_CVP_THROTTLE_CFG,
SA8775P_SLAVE_VENUS_V_CPU_THROTTLE_CFG,
SA8775P_SLAVE_VENUS_VCODEC_THROTTLE_CFG,
SA8775P_SLAVE_DDRSS_CFG,
SA8775P_SLAVE_GPDSP_NOC_CFG,
SA8775P_SLAVE_CNOC_MNOC_HF_CFG,
SA8775P_SLAVE_CNOC_MNOC_SF_CFG,
SA8775P_SLAVE_PCIE_ANOC_CFG,
SA8775P_SLAVE_SNOC_CFG,
SA8775P_SLAVE_BOOT_IMEM,
SA8775P_SLAVE_IMEM,
SA8775P_SLAVE_PIMEM,
SA8775P_SLAVE_QDSS_STM,
SA8775P_SLAVE_TCU
},
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
.id = SA8775P_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
.links = { SA8775P_SLAVE_PCIE_0,
SA8775P_SLAVE_PCIE_1
},
};
static struct qcom_icc_node qnm_cnoc_dc_noc = {
.name = "qnm_cnoc_dc_noc",
.id = SA8775P_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SA8775P_SLAVE_LLCC_CFG,
SA8775P_SLAVE_GEM_NOC_CFG
},
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
.id = SA8775P_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SA8775P_SLAVE_GEM_NOC_CNOC,
SA8775P_SLAVE_LLCC
},
};
static struct qcom_icc_node alm_pcie_tcu = {
.name = "alm_pcie_tcu",
.id = SA8775P_MASTER_PCIE_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SA8775P_SLAVE_GEM_NOC_CNOC,
SA8775P_SLAVE_LLCC
},
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
.id = SA8775P_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SA8775P_SLAVE_GEM_NOC_CNOC,
SA8775P_SLAVE_LLCC
},
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
.id = SA8775P_MASTER_APPSS_PROC,
.channels = 4,
.buswidth = 32,
.num_links = 3,
.links = { SA8775P_SLAVE_GEM_NOC_CNOC,
SA8775P_SLAVE_LLCC,
SA8775P_SLAVE_GEM_NOC_PCIE_CNOC
},
};
static struct qcom_icc_node qnm_cmpnoc0 = {
.name = "qnm_cmpnoc0",
.id = SA8775P_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SA8775P_SLAVE_GEM_NOC_CNOC,
SA8775P_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_cmpnoc1 = {
.name = "qnm_cmpnoc1",
.id = SA8775P_MASTER_COMPUTE_NOC_1,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SA8775P_SLAVE_GEM_NOC_CNOC,
SA8775P_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_gemnoc_cfg = {
.name = "qnm_gemnoc_cfg",
.id = SA8775P_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 4,
.links = { SA8775P_SLAVE_SERVICE_GEM_NOC_1,
SA8775P_SLAVE_SERVICE_GEM_NOC_2,
SA8775P_SLAVE_SERVICE_GEM_NOC,
SA8775P_SLAVE_SERVICE_GEM_NOC2
},
};
static struct qcom_icc_node qnm_gpdsp_sail = {
.name = "qnm_gpdsp_sail",
.id = SA8775P_MASTER_GPDSP_SAIL,
.channels = 1,
.buswidth = 16,
.num_links = 2,
.links = { SA8775P_SLAVE_GEM_NOC_CNOC,
SA8775P_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
.id = SA8775P_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SA8775P_SLAVE_GEM_NOC_CNOC,
SA8775P_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SA8775P_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SA8775P_SLAVE_LLCC,
SA8775P_SLAVE_GEM_NOC_PCIE_CNOC
},
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SA8775P_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
.links = { SA8775P_SLAVE_GEM_NOC_CNOC,
SA8775P_SLAVE_LLCC,
SA8775P_SLAVE_GEM_NOC_PCIE_CNOC
},
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
.id = SA8775P_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
.links = { SA8775P_SLAVE_GEM_NOC_CNOC,
SA8775P_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SA8775P_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SA8775P_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SA8775P_SLAVE_GEM_NOC_CNOC,
SA8775P_SLAVE_LLCC,
SA8775P_SLAVE_GEM_NOC_PCIE_CNOC },
};
static struct qcom_icc_node qxm_dsp0 = {
.name = "qxm_dsp0",
.id = SA8775P_MASTER_DSP0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SA8775P_SLAVE_GP_DSP_SAIL_NOC },
};
static struct qcom_icc_node qxm_dsp1 = {
.name = "qxm_dsp1",
.id = SA8775P_MASTER_DSP1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SA8775P_SLAVE_GP_DSP_SAIL_NOC },
};
static struct qcom_icc_node qhm_config_noc = {
.name = "qhm_config_noc",
.id = SA8775P_MASTER_CNOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 6,
.links = { SA8775P_SLAVE_LPASS_CORE_CFG,
SA8775P_SLAVE_LPASS_LPI_CFG,
SA8775P_SLAVE_LPASS_MPU_CFG,
SA8775P_SLAVE_LPASS_TOP_CFG,
SA8775P_SLAVE_SERVICES_LPASS_AML_NOC,
SA8775P_SLAVE_SERVICE_LPASS_AG_NOC
},
};
static struct qcom_icc_node qxm_lpass_dsp = {
.name = "qxm_lpass_dsp",
.id = SA8775P_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 4,
.links = { SA8775P_SLAVE_LPASS_TOP_CFG,
SA8775P_SLAVE_LPASS_SNOC,
SA8775P_SLAVE_SERVICES_LPASS_AML_NOC,
SA8775P_SLAVE_SERVICE_LPASS_AG_NOC
},
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SA8775P_MASTER_LLCC,
.channels = 8,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_EBI1 },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
.id = SA8775P_MASTER_CAMNOC_HF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
.id = SA8775P_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
.id = SA8775P_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_mdp0_0 = {
.name = "qnm_mdp0_0",
.id = SA8775P_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_mdp0_1 = {
.name = "qnm_mdp0_1",
.id = SA8775P_MASTER_MDP1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_mdp1_0 = {
.name = "qnm_mdp1_0",
.id = SA8775P_MASTER_MDP_CORE1_0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_mdp1_1 = {
.name = "qnm_mdp1_1",
.id = SA8775P_MASTER_MDP_CORE1_1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_mnoc_hf_cfg = {
.name = "qnm_mnoc_hf_cfg",
.id = SA8775P_MASTER_CNOC_MNOC_HF_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_SERVICE_MNOC_HF },
};
static struct qcom_icc_node qnm_mnoc_sf_cfg = {
.name = "qnm_mnoc_sf_cfg",
.id = SA8775P_MASTER_CNOC_MNOC_SF_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_SERVICE_MNOC_SF },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
.id = SA8775P_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video1 = {
.name = "qnm_video1",
.id = SA8775P_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
.id = SA8775P_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
.id = SA8775P_MASTER_VIDEO_V_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
.name = "qhm_nsp_noc_config",
.id = SA8775P_MASTER_CDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_SERVICE_NSP_NOC },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
.id = SA8775P_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SA8775P_SLAVE_HCP_A, SLAVE_CDSP_MEM_NOC },
};
static struct qcom_icc_node qhm_nspb_noc_config = {
.name = "qhm_nspb_noc_config",
.id = SA8775P_MASTER_CDSPB_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_SERVICE_NSPB_NOC },
};
static struct qcom_icc_node qxm_nspb = {
.name = "qxm_nspb",
.id = SA8775P_MASTER_CDSP_PROC_B,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SA8775P_SLAVE_HCP_B, SLAVE_CDSPB_MEM_NOC },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
.id = SA8775P_MASTER_PCIE_0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SA8775P_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
.id = SA8775P_MASTER_PCIE_1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node qhm_gic = {
.name = "qhm_gic",
.id = SA8775P_MASTER_GIC_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
.id = SA8775P_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
.id = SA8775P_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SA8775P_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_lpass_noc = {
.name = "qnm_lpass_noc",
.id = SA8775P_MASTER_LPASS_ANOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SA8775P_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_snoc_cfg = {
.name = "qnm_snoc_cfg",
.id = SA8775P_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.id = SA8775P_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = SA8775P_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SA8775P_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_MASTER_A1NOC_SNOC },
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
.id = SA8775P_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SA8775P_MASTER_A2NOC_SNOC },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = SA8775P_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
.id = SA8775P_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
.id = SA8775P_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup3_core_slave = {
.name = "qup3_core_slave",
.id = SA8775P_SLAVE_QUP_CORE_3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
.id = SA8775P_SLAVE_AHB2PHY_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
.id = SA8775P_SLAVE_AHB2PHY_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy2 = {
.name = "qhs_ahb2phy2",
.id = SA8775P_SLAVE_AHB2PHY_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy3 = {
.name = "qhs_ahb2phy3",
.id = SA8775P_SLAVE_AHB2PHY_3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_anoc_throttle_cfg = {
.name = "qhs_anoc_throttle_cfg",
.id = SA8775P_SLAVE_ANOC_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SA8775P_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SA8775P_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_boot_rom = {
.name = "qhs_boot_rom",
.id = SA8775P_SLAVE_BOOT_ROM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
.id = SA8775P_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_nrt_throttle_cfg = {
.name = "qhs_camera_nrt_throttle_cfg",
.id = SA8775P_SLAVE_CAMERA_NRT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
.name = "qhs_camera_rt_throttle_cfg",
.id = SA8775P_SLAVE_CAMERA_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SA8775P_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute0_cfg = {
.name = "qhs_compute0_cfg",
.id = SA8775P_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_MASTER_CDSP_NOC_CFG },
};
static struct qcom_icc_node qhs_compute1_cfg = {
.name = "qhs_compute1_cfg",
.id = SA8775P_SLAVE_CDSP1_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_MASTER_CDSPB_NOC_CFG },
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = SA8775P_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
.id = SA8775P_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
.id = SA8775P_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_nspcx = {
.name = "qhs_cpr_nspcx",
.id = SA8775P_SLAVE_CPR_NSPCX,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SA8775P_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
.id = SA8775P_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display0_cfg = {
.name = "qhs_display0_cfg",
.id = SA8775P_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display0_rt_throttle_cfg = {
.name = "qhs_display0_rt_throttle_cfg",
.id = SA8775P_SLAVE_DISPLAY_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display1_cfg = {
.name = "qhs_display1_cfg",
.id = SA8775P_SLAVE_DISPLAY1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display1_rt_throttle_cfg = {
.name = "qhs_display1_rt_throttle_cfg",
.id = SA8775P_SLAVE_DISPLAY1_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emac0_cfg = {
.name = "qhs_emac0_cfg",
.id = SA8775P_SLAVE_EMAC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emac1_cfg = {
.name = "qhs_emac1_cfg",
.id = SA8775P_SLAVE_EMAC1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gp_dsp0_cfg = {
.name = "qhs_gp_dsp0_cfg",
.id = SA8775P_SLAVE_GP_DSP0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gp_dsp1_cfg = {
.name = "qhs_gp_dsp1_cfg",
.id = SA8775P_SLAVE_GP_DSP1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpdsp0_throttle_cfg = {
.name = "qhs_gpdsp0_throttle_cfg",
.id = SA8775P_SLAVE_GPDSP0_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpdsp1_throttle_cfg = {
.name = "qhs_gpdsp1_throttle_cfg",
.id = SA8775P_SLAVE_GPDSP1_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpu_tcu_throttle_cfg = {
.name = "qhs_gpu_tcu_throttle_cfg",
.id = SA8775P_SLAVE_GPU_TCU_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
.id = SA8775P_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_hwkm = {
.name = "qhs_hwkm",
.id = SA8775P_SLAVE_HWKM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SA8775P_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SA8775P_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
.id = SA8775P_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
.id = SA8775P_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_MASTER_CNOC_LPASS_AG_NOC },
};
static struct qcom_icc_node qhs_lpass_throttle_cfg = {
.name = "qhs_lpass_throttle_cfg",
.id = SA8775P_SLAVE_LPASS_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
.id = SA8775P_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mxc_rdpm = {
.name = "qhs_mxc_rdpm",
.id = SA8775P_SLAVE_MXC_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
.id = SA8775P_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
.id = SA8775P_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_rsc_cfg = {
.name = "qhs_pcie_rsc_cfg",
.id = SA8775P_SLAVE_PCIE_RSC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_tcu_throttle_cfg = {
.name = "qhs_pcie_tcu_throttle_cfg",
.id = SA8775P_SLAVE_PCIE_TCU_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_throttle_cfg = {
.name = "qhs_pcie_throttle_cfg",
.id = SA8775P_SLAVE_PCIE_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SA8775P_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = SA8775P_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pke_wrapper_cfg = {
.name = "qhs_pke_wrapper_cfg",
.id = SA8775P_SLAVE_PKA_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SA8775P_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qm_cfg = {
.name = "qhs_qm_cfg",
.id = SA8775P_SLAVE_QM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qm_mpu_cfg = {
.name = "qhs_qm_mpu_cfg",
.id = SA8775P_SLAVE_QM_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
.id = SA8775P_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
.id = SA8775P_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
.id = SA8775P_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup3 = {
.name = "qhs_qup3",
.id = SA8775P_SLAVE_QUP_3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sail_throttle_cfg = {
.name = "qhs_sail_throttle_cfg",
.id = SA8775P_SLAVE_SAIL_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc1 = {
.name = "qhs_sdc1",
.id = SA8775P_SLAVE_SDC1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
.id = SA8775P_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_throttle_cfg = {
.name = "qhs_snoc_throttle_cfg",
.id = SA8775P_SLAVE_SNOC_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SA8775P_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
.id = SA8775P_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tsc_cfg = {
.name = "qhs_tsc_cfg",
.id = SA8775P_SLAVE_TSC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_card_cfg = {
.name = "qhs_ufs_card_cfg",
.id = SA8775P_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
.id = SA8775P_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb2_0 = {
.name = "qhs_usb2_0",
.id = SA8775P_SLAVE_USB2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
.id = SA8775P_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_1 = {
.name = "qhs_usb3_1",
.id = SA8775P_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
.id = SA8775P_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cvp_throttle_cfg = {
.name = "qhs_venus_cvp_throttle_cfg",
.id = SA8775P_SLAVE_VENUS_CVP_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_v_cpu_throttle_cfg = {
.name = "qhs_venus_v_cpu_throttle_cfg",
.id = SA8775P_SLAVE_VENUS_V_CPU_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_vcodec_throttle_cfg = {
.name = "qhs_venus_vcodec_throttle_cfg",
.id = SA8775P_SLAVE_VENUS_VCODEC_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
.id = SA8775P_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_MASTER_CNOC_DC_NOC },
};
static struct qcom_icc_node qns_gpdsp_noc_cfg = {
.name = "qns_gpdsp_noc_cfg",
.id = SA8775P_SLAVE_GPDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_mnoc_hf_cfg = {
.name = "qns_mnoc_hf_cfg",
.id = SA8775P_SLAVE_CNOC_MNOC_HF_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_MASTER_CNOC_MNOC_HF_CFG },
};
static struct qcom_icc_node qns_mnoc_sf_cfg = {
.name = "qns_mnoc_sf_cfg",
.id = SA8775P_SLAVE_CNOC_MNOC_SF_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_MASTER_CNOC_MNOC_SF_CFG },
};
static struct qcom_icc_node qns_pcie_anoc_cfg = {
.name = "qns_pcie_anoc_cfg",
.id = SA8775P_SLAVE_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_snoc_cfg = {
.name = "qns_snoc_cfg",
.id = SA8775P_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qxs_boot_imem = {
.name = "qxs_boot_imem",
.id = SA8775P_SLAVE_BOOT_IMEM,
.channels = 1,
.buswidth = 16,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SA8775P_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
.id = SA8775P_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
.id = SA8775P_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 16,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
.id = SA8775P_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SA8775P_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SA8775P_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
.id = SA8775P_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gemnoc = {
.name = "qns_gemnoc",
.id = SA8775P_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SA8775P_MASTER_GEM_NOC_CFG },
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
.id = SA8775P_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SA8775P_MASTER_GEM_NOC_CNOC },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SA8775P_SLAVE_LLCC,
.channels = 6,
.buswidth = 16,
.num_links = 1,
.links = { SA8775P_MASTER_LLCC },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
.id = SA8775P_SLAVE_GEM_NOC_PCIE_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SA8775P_MASTER_GEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node srvc_even_gemnoc = {
.name = "srvc_even_gemnoc",
.id = SA8775P_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_odd_gemnoc = {
.name = "srvc_odd_gemnoc",
.id = SA8775P_SLAVE_SERVICE_GEM_NOC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_sys_gemnoc = {
.name = "srvc_sys_gemnoc",
.id = SA8775P_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_sys_gemnoc_2 = {
.name = "srvc_sys_gemnoc_2",
.id = SA8775P_SLAVE_SERVICE_GEM_NOC2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gp_dsp_sail_noc = {
.name = "qns_gp_dsp_sail_noc",
.id = SA8775P_SLAVE_GP_DSP_SAIL_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SA8775P_MASTER_GPDSP_SAIL },
};
static struct qcom_icc_node qhs_lpass_core = {
.name = "qhs_lpass_core",
.id = SA8775P_SLAVE_LPASS_CORE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_lpi = {
.name = "qhs_lpass_lpi",
.id = SA8775P_SLAVE_LPASS_LPI_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_mpu = {
.name = "qhs_lpass_mpu",
.id = SA8775P_SLAVE_LPASS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_top = {
.name = "qhs_lpass_top",
.id = SA8775P_SLAVE_LPASS_TOP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_sysnoc = {
.name = "qns_sysnoc",
.id = SA8775P_SLAVE_LPASS_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SA8775P_MASTER_LPASS_ANOC },
};
static struct qcom_icc_node srvc_niu_aml_noc = {
.name = "srvc_niu_aml_noc",
.id = SA8775P_SLAVE_SERVICES_LPASS_AML_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_niu_lpass_agnoc = {
.name = "srvc_niu_lpass_agnoc",
.id = SA8775P_SLAVE_SERVICE_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SA8775P_SLAVE_EBI1,
.channels = 8,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
.id = SA8775P_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_MASTER_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
.id = SA8775P_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_MASTER_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_mnoc_hf = {
.name = "srvc_mnoc_hf",
.id = SA8775P_SLAVE_SERVICE_MNOC_HF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_mnoc_sf = {
.name = "srvc_mnoc_sf",
.id = SA8775P_SLAVE_SERVICE_MNOC_SF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_hcp = {
.name = "qns_hcp",
.id = SA8775P_SLAVE_HCP_A,
.channels = 2,
.buswidth = 32,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
.id = SA8775P_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_MASTER_COMPUTE_NOC },
};
static struct qcom_icc_node service_nsp_noc = {
.name = "service_nsp_noc",
.id = SA8775P_SLAVE_SERVICE_NSP_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_nspb_gemnoc = {
.name = "qns_nspb_gemnoc",
.id = SA8775P_SLAVE_CDSPB_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_MASTER_COMPUTE_NOC_1 },
};
static struct qcom_icc_node qns_nspb_hcp = {
.name = "qns_nspb_hcp",
.id = SA8775P_SLAVE_HCP_B,
.channels = 2,
.buswidth = 32,
};
static struct qcom_icc_node service_nspb_noc = {
.name = "service_nspb_noc",
.id = SA8775P_SLAVE_SERVICE_NSPB_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
.id = SA8775P_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SA8775P_MASTER_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
.id = SA8775P_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SA8775P_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
.id = SA8775P_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SA8775P_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SA8775P_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.enable_mask = 0x8,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 2,
.nodes = { &qxm_crypto_0, &qxm_crypto_1 },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.keepalive = true,
.num_nodes = 2,
.nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie },
};
static struct qcom_icc_bcm bcm_cn1 = {
.name = "CN1",
.num_nodes = 76,
.nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
&qhs_ahb2phy2, &qhs_ahb2phy3,
&qhs_anoc_throttle_cfg, &qhs_aoss,
&qhs_apss, &qhs_boot_rom,
&qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg,
&qhs_camera_rt_throttle_cfg, &qhs_clk_ctl,
&qhs_compute0_cfg, &qhs_compute1_cfg,
&qhs_cpr_cx, &qhs_cpr_mmcx,
&qhs_cpr_mx, &qhs_cpr_nspcx,
&qhs_crypto0_cfg, &qhs_cx_rdpm,
&qhs_display0_cfg, &qhs_display0_rt_throttle_cfg,
&qhs_display1_cfg, &qhs_display1_rt_throttle_cfg,
&qhs_emac0_cfg, &qhs_emac1_cfg,
&qhs_gp_dsp0_cfg, &qhs_gp_dsp1_cfg,
&qhs_gpdsp0_throttle_cfg, &qhs_gpdsp1_throttle_cfg,
&qhs_gpu_tcu_throttle_cfg, &qhs_gpuss_cfg,
&qhs_hwkm, &qhs_imem_cfg,
&qhs_ipa, &qhs_ipc_router,
&qhs_lpass_cfg, &qhs_lpass_throttle_cfg,
&qhs_mx_rdpm, &qhs_mxc_rdpm,
&qhs_pcie0_cfg, &qhs_pcie1_cfg,
&qhs_pcie_rsc_cfg, &qhs_pcie_tcu_throttle_cfg,
&qhs_pcie_throttle_cfg, &qhs_pdm,
&qhs_pimem_cfg, &qhs_pke_wrapper_cfg,
&qhs_qdss_cfg, &qhs_qm_cfg,
&qhs_qm_mpu_cfg, &qhs_sail_throttle_cfg,
&qhs_sdc1, &qhs_security,
&qhs_snoc_throttle_cfg, &qhs_tcsr,
&qhs_tlmm, &qhs_tsc_cfg,
&qhs_ufs_card_cfg, &qhs_ufs_mem_cfg,
&qhs_usb2_0, &qhs_usb3_0,
&qhs_usb3_1, &qhs_venus_cfg,
&qhs_venus_cvp_throttle_cfg, &qhs_venus_v_cpu_throttle_cfg,
&qhs_venus_vcodec_throttle_cfg, &qns_ddrss_cfg,
&qns_gpdsp_noc_cfg, &qns_mnoc_hf_cfg,
&qns_mnoc_sf_cfg, &qns_pcie_anoc_cfg,
&qns_snoc_cfg, &qxs_boot_imem,
&qxs_imem, &xs_sys_tcu_cfg },
};
static struct qcom_icc_bcm bcm_cn2 = {
.name = "CN2",
.num_nodes = 4,
.nodes = { &qhs_qup0, &qhs_qup1,
&qhs_qup2, &qhs_qup3 },
};
static struct qcom_icc_bcm bcm_cn3 = {
.name = "CN3",
.num_nodes = 2,
.nodes = { &xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_bcm bcm_gna0 = {
.name = "GNA0",
.num_nodes = 1,
.nodes = { &qxm_dsp0 },
};
static struct qcom_icc_bcm bcm_gnb0 = {
.name = "GNB0",
.num_nodes = 1,
.nodes = { &qxm_dsp1 },
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.keepalive = true,
.num_nodes = 5,
.nodes = { &qnm_camnoc_hf, &qnm_mdp0_0,
&qnm_mdp0_1, &qnm_mdp1_0,
&qns_mem_noc_hf },
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.num_nodes = 7,
.nodes = { &qnm_camnoc_icp, &qnm_camnoc_sf,
&qnm_video0, &qnm_video1,
&qnm_video_cvp, &qnm_video_v_cpu,
&qns_mem_noc_sf },
};
static struct qcom_icc_bcm bcm_nsa0 = {
.name = "NSA0",
.num_nodes = 2,
.nodes = { &qns_hcp, &qns_nsp_gemnoc },
};
static struct qcom_icc_bcm bcm_nsa1 = {
.name = "NSA1",
.num_nodes = 1,
.nodes = { &qxm_nsp },
};
static struct qcom_icc_bcm bcm_nsb0 = {
.name = "NSB0",
.num_nodes = 2,
.nodes = { &qns_nspb_gemnoc, &qns_nspb_hcp },
};
static struct qcom_icc_bcm bcm_nsb1 = {
.name = "NSB1",
.num_nodes = 1,
.nodes = { &qxm_nspb },
};
static struct qcom_icc_bcm bcm_pci0 = {
.name = "PCI0",
.num_nodes = 1,
.nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup0_core_slave },
};
static struct qcom_icc_bcm bcm_qup1 = {
.name = "QUP1",
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup1_core_slave },
};
static struct qcom_icc_bcm bcm_qup2 = {
.name = "QUP2",
.vote_scale = 1,
.num_nodes = 2,
.nodes = { &qup2_core_slave, &qup3_core_slave },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_sh2 = {
.name = "SH2",
.num_nodes = 1,
.nodes = { &chm_apps },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.num_nodes = 1,
.nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.num_nodes = 1,
.nodes = { &qxs_pimem },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.num_nodes = 2,
.nodes = { &qns_a1noc_snoc, &qnm_aggre1_noc },
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.num_nodes = 2,
.nodes = { &qns_a2noc_snoc, &qnm_aggre2_noc },
};
static struct qcom_icc_bcm bcm_sn9 = {
.name = "SN9",
.num_nodes = 2,
.nodes = { &qns_sysnoc, &qnm_lpass_noc },
};
static struct qcom_icc_bcm bcm_sn10 = {
.name = "SN10",
.num_nodes = 1,
.nodes = { &xs_qdss_stm },
};
static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
&bcm_sn3,
};
static struct qcom_icc_node *aggre1_noc_nodes[] = {
[MASTER_QUP_3] = &qxm_qup3,
[MASTER_EMAC] = &xm_emac_0,
[MASTER_EMAC_1] = &xm_emac_1,
[MASTER_SDC] = &xm_sdc1,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[MASTER_USB2] = &xm_usb2_2,
[MASTER_USB3_0] = &xm_usb3_0,
[MASTER_USB3_1] = &xm_usb3_1,
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
};
static const struct qcom_icc_desc sa8775p_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_sn4,
};
static struct qcom_icc_node *aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_QUP_2] = &qhm_qup2,
[MASTER_CNOC_A2NOC] = &qnm_cnoc_datapath,
[MASTER_CRYPTO_CORE0] = &qxm_crypto_0,
[MASTER_CRYPTO_CORE1] = &qxm_crypto_1,
[MASTER_IPA] = &qxm_ipa,
[MASTER_QDSS_ETR_0] = &xm_qdss_etr_0,
[MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
[MASTER_UFS_CARD] = &xm_ufs_card,
[SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
};
static const struct qcom_icc_desc sa8775p_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm *clk_virt_bcms[] = {
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
static struct qcom_icc_node *clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
[MASTER_QUP_CORE_3] = &qup3_core_master,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
[SLAVE_QUP_CORE_3] = &qup3_core_slave,
};
static const struct qcom_icc_desc sa8775p_clk_virt = {
.nodes = clk_virt_nodes,
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
static struct qcom_icc_bcm *config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
&bcm_cn2,
&bcm_cn3,
&bcm_sn2,
&bcm_sn10,
};
static struct qcom_icc_node *config_noc_nodes[] = {
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[SLAVE_AHB2PHY_0] = &qhs_ahb2phy0,
[SLAVE_AHB2PHY_1] = &qhs_ahb2phy1,
[SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
[SLAVE_AHB2PHY_3] = &qhs_ahb2phy3,
[SLAVE_ANOC_THROTTLE_CFG] = &qhs_anoc_throttle_cfg,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_APPSS] = &qhs_apss,
[SLAVE_BOOT_ROM] = &qhs_boot_rom,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_throttle_cfg,
[SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CDSP_CFG] = &qhs_compute0_cfg,
[SLAVE_CDSP1_CFG] = &qhs_compute1_cfg,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
[SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
[SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_CX_RDPM] = &qhs_cx_rdpm,
[SLAVE_DISPLAY_CFG] = &qhs_display0_cfg,
[SLAVE_DISPLAY_RT_THROTTLE_CFG] = &qhs_display0_rt_throttle_cfg,
[SLAVE_DISPLAY1_CFG] = &qhs_display1_cfg,
[SLAVE_DISPLAY1_RT_THROTTLE_CFG] = &qhs_display1_rt_throttle_cfg,
[SLAVE_EMAC_CFG] = &qhs_emac0_cfg,
[SLAVE_EMAC1_CFG] = &qhs_emac1_cfg,
[SLAVE_GP_DSP0_CFG] = &qhs_gp_dsp0_cfg,
[SLAVE_GP_DSP1_CFG] = &qhs_gp_dsp1_cfg,
[SLAVE_GPDSP0_THROTTLE_CFG] = &qhs_gpdsp0_throttle_cfg,
[SLAVE_GPDSP1_THROTTLE_CFG] = &qhs_gpdsp1_throttle_cfg,
[SLAVE_GPU_TCU_THROTTLE_CFG] = &qhs_gpu_tcu_throttle_cfg,
[SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_HWKM] = &qhs_hwkm,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
[SLAVE_LPASS] = &qhs_lpass_cfg,
[SLAVE_LPASS_THROTTLE_CFG] = &qhs_lpass_throttle_cfg,
[SLAVE_MX_RDPM] = &qhs_mx_rdpm,
[SLAVE_MXC_RDPM] = &qhs_mxc_rdpm,
[SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
[SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
[SLAVE_PCIE_RSC_CFG] = &qhs_pcie_rsc_cfg,
[SLAVE_PCIE_TCU_THROTTLE_CFG] = &qhs_pcie_tcu_throttle_cfg,
[SLAVE_PCIE_THROTTLE_CFG] = &qhs_pcie_throttle_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PKA_WRAPPER_CFG] = &qhs_pke_wrapper_cfg,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QM_CFG] = &qhs_qm_cfg,
[SLAVE_QM_MPU_CFG] = &qhs_qm_mpu_cfg,
[SLAVE_QUP_0] = &qhs_qup0,
[SLAVE_QUP_1] = &qhs_qup1,
[SLAVE_QUP_2] = &qhs_qup2,
[SLAVE_QUP_3] = &qhs_qup3,
[SLAVE_SAIL_THROTTLE_CFG] = &qhs_sail_throttle_cfg,
[SLAVE_SDC1] = &qhs_sdc1,
[SLAVE_SECURITY] = &qhs_security,
[SLAVE_SNOC_THROTTLE_CFG] = &qhs_snoc_throttle_cfg,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM] = &qhs_tlmm,
[SLAVE_TSC_CFG] = &qhs_tsc_cfg,
[SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB2] = &qhs_usb2_0,
[SLAVE_USB3_0] = &qhs_usb3_0,
[SLAVE_USB3_1] = &qhs_usb3_1,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VENUS_CVP_THROTTLE_CFG] = &qhs_venus_cvp_throttle_cfg,
[SLAVE_VENUS_V_CPU_THROTTLE_CFG] = &qhs_venus_v_cpu_throttle_cfg,
[SLAVE_VENUS_VCODEC_THROTTLE_CFG] = &qhs_venus_vcodec_throttle_cfg,
[SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
[SLAVE_GPDSP_NOC_CFG] = &qns_gpdsp_noc_cfg,
[SLAVE_CNOC_MNOC_HF_CFG] = &qns_mnoc_hf_cfg,
[SLAVE_CNOC_MNOC_SF_CFG] = &qns_mnoc_sf_cfg,
[SLAVE_PCIE_ANOC_CFG] = &qns_pcie_anoc_cfg,
[SLAVE_SNOC_CFG] = &qns_snoc_cfg,
[SLAVE_BOOT_IMEM] = &qxs_boot_imem,
[SLAVE_IMEM] = &qxs_imem,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_PCIE_0] = &xs_pcie_0,
[SLAVE_PCIE_1] = &xs_pcie_1,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sa8775p_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static struct qcom_icc_bcm *dc_noc_bcms[] = {
};
static struct qcom_icc_node *dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
};
static const struct qcom_icc_desc sa8775p_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
static struct qcom_icc_bcm *gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
};
static struct qcom_icc_node *gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_PCIE_TCU] = &alm_pcie_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
[MASTER_COMPUTE_NOC] = &qnm_cmpnoc0,
[MASTER_COMPUTE_NOC_1] = &qnm_cmpnoc1,
[MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg,
[MASTER_GPDSP_SAIL] = &qnm_gpdsp_sail,
[MASTER_GFX3D] = &qnm_gpu,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_GEM_NOC_PCIE_CNOC] = &qns_pcie,
[SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
[SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
[SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
[SLAVE_SERVICE_GEM_NOC2] = &srvc_sys_gemnoc_2,
};
static const struct qcom_icc_desc sa8775p_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static struct qcom_icc_bcm *gpdsp_anoc_bcms[] = {
&bcm_gna0,
&bcm_gnb0,
};
static struct qcom_icc_node *gpdsp_anoc_nodes[] = {
[MASTER_DSP0] = &qxm_dsp0,
[MASTER_DSP1] = &qxm_dsp1,
[SLAVE_GP_DSP_SAIL_NOC] = &qns_gp_dsp_sail_noc,
};
static const struct qcom_icc_desc sa8775p_gpdsp_anoc = {
.nodes = gpdsp_anoc_nodes,
.num_nodes = ARRAY_SIZE(gpdsp_anoc_nodes),
.bcms = gpdsp_anoc_bcms,
.num_bcms = ARRAY_SIZE(gpdsp_anoc_bcms),
};
static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
&bcm_sn9,
};
static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[MASTER_LPASS_PROC] = &qxm_lpass_dsp,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
[SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
[SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
[SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
[SLAVE_LPASS_SNOC] = &qns_sysnoc,
[SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
[SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
};
static const struct qcom_icc_desc sa8775p_lpass_ag_noc = {
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
static struct qcom_icc_bcm *mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
static struct qcom_icc_node *mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
static const struct qcom_icc_desc sa8775p_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm *mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
};
static struct qcom_icc_node *mmss_noc_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
[MASTER_MDP0] = &qnm_mdp0_0,
[MASTER_MDP1] = &qnm_mdp0_1,
[MASTER_MDP_CORE1_0] = &qnm_mdp1_0,
[MASTER_MDP_CORE1_1] = &qnm_mdp1_1,
[MASTER_CNOC_MNOC_HF_CFG] = &qnm_mnoc_hf_cfg,
[MASTER_CNOC_MNOC_SF_CFG] = &qnm_mnoc_sf_cfg,
[MASTER_VIDEO_P0] = &qnm_video0,
[MASTER_VIDEO_P1] = &qnm_video1,
[MASTER_VIDEO_PROC] = &qnm_video_cvp,
[MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC_HF] = &srvc_mnoc_hf,
[SLAVE_SERVICE_MNOC_SF] = &srvc_mnoc_sf,
};
static const struct qcom_icc_desc sa8775p_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_bcm *nspa_noc_bcms[] = {
&bcm_nsa0,
&bcm_nsa1,
};
static struct qcom_icc_node *nspa_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_HCP_A] = &qns_hcp,
[SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
[SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
};
static const struct qcom_icc_desc sa8775p_nspa_noc = {
.nodes = nspa_noc_nodes,
.num_nodes = ARRAY_SIZE(nspa_noc_nodes),
.bcms = nspa_noc_bcms,
.num_bcms = ARRAY_SIZE(nspa_noc_bcms),
};
static struct qcom_icc_bcm *nspb_noc_bcms[] = {
&bcm_nsb0,
&bcm_nsb1,
};
static struct qcom_icc_node *nspb_noc_nodes[] = {
[MASTER_CDSPB_NOC_CFG] = &qhm_nspb_noc_config,
[MASTER_CDSP_PROC_B] = &qxm_nspb,
[SLAVE_CDSPB_MEM_NOC] = &qns_nspb_gemnoc,
[SLAVE_HCP_B] = &qns_nspb_hcp,
[SLAVE_SERVICE_NSPB_NOC] = &service_nspb_noc,
};
static const struct qcom_icc_desc sa8775p_nspb_noc = {
.nodes = nspb_noc_nodes,
.num_nodes = ARRAY_SIZE(nspb_noc_nodes),
.bcms = nspb_noc_bcms,
.num_bcms = ARRAY_SIZE(nspb_noc_bcms),
};
static struct qcom_icc_bcm *pcie_anoc_bcms[] = {
&bcm_pci0,
};
static struct qcom_icc_node *pcie_anoc_nodes[] = {
[MASTER_PCIE_0] = &xm_pcie3_0,
[MASTER_PCIE_1] = &xm_pcie3_1,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
};
static const struct qcom_icc_desc sa8775p_pcie_anoc = {
.nodes = pcie_anoc_nodes,
.num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
.bcms = pcie_anoc_bcms,
.num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
};
static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn3,
&bcm_sn4,
&bcm_sn9,
};
static struct qcom_icc_node *system_noc_nodes[] = {
[MASTER_GIC_AHB] = &qhm_gic,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_LPASS_ANOC] = &qnm_lpass_noc,
[MASTER_SNOC_CFG] = &qnm_snoc_cfg,
[MASTER_PIMEM] = &qxm_pimem,
[MASTER_GIC] = &xm_gic,
[SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
};
static const struct qcom_icc_desc sa8775p_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sa8775p-aggre1-noc", .data = &sa8775p_aggre1_noc, },
{ .compatible = "qcom,sa8775p-aggre2-noc", .data = &sa8775p_aggre2_noc, },
{ .compatible = "qcom,sa8775p-clk-virt", .data = &sa8775p_clk_virt, },
{ .compatible = "qcom,sa8775p-config-noc", .data = &sa8775p_config_noc, },
{ .compatible = "qcom,sa8775p-dc-noc", .data = &sa8775p_dc_noc, },
{ .compatible = "qcom,sa8775p-gem-noc", .data = &sa8775p_gem_noc, },
{ .compatible = "qcom,sa8775p-gpdsp-anoc", .data = &sa8775p_gpdsp_anoc, },
{ .compatible = "qcom,sa8775p-lpass-ag-noc", .data = &sa8775p_lpass_ag_noc, },
{ .compatible = "qcom,sa8775p-mc-virt", .data = &sa8775p_mc_virt, },
{ .compatible = "qcom,sa8775p-mmss-noc", .data = &sa8775p_mmss_noc, },
{ .compatible = "qcom,sa8775p-nspa-noc", .data = &sa8775p_nspa_noc, },
{ .compatible = "qcom,sa8775p-nspb-noc", .data = &sa8775p_nspb_noc, },
{ .compatible = "qcom,sa8775p-pcie-anoc", .data = &sa8775p_pcie_anoc, },
{ .compatible = "qcom,sa8775p-system-noc", .data = &sa8775p_system_noc, },
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sa8775p",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
static int __init qnoc_driver_init(void)
{
return platform_driver_register(&qnoc_driver);
}
core_initcall(qnoc_driver_init);
static void __exit qnoc_driver_exit(void)
{
platform_driver_unregister(&qnoc_driver);
}
module_exit(qnoc_driver_exit);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SA8775P NoC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/interconnect/qcom/sa8775p.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm QCM2290 Network-on-Chip (NoC) QoS driver
*
* Copyright (c) 2021, Linaro Ltd.
*
*/
#include <dt-bindings/interconnect/qcom,qcm2290.h>
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include "icc-rpm.h"
enum {
QCM2290_MASTER_APPSS_PROC = 1,
QCM2290_MASTER_SNOC_BIMC_RT,
QCM2290_MASTER_SNOC_BIMC_NRT,
QCM2290_MASTER_SNOC_BIMC,
QCM2290_MASTER_TCU_0,
QCM2290_MASTER_GFX3D,
QCM2290_MASTER_SNOC_CNOC,
QCM2290_MASTER_QDSS_DAP,
QCM2290_MASTER_CRYPTO_CORE0,
QCM2290_MASTER_SNOC_CFG,
QCM2290_MASTER_TIC,
QCM2290_MASTER_ANOC_SNOC,
QCM2290_MASTER_BIMC_SNOC,
QCM2290_MASTER_PIMEM,
QCM2290_MASTER_QDSS_BAM,
QCM2290_MASTER_QUP_0,
QCM2290_MASTER_IPA,
QCM2290_MASTER_QDSS_ETR,
QCM2290_MASTER_SDCC_1,
QCM2290_MASTER_SDCC_2,
QCM2290_MASTER_QPIC,
QCM2290_MASTER_USB3_0,
QCM2290_MASTER_QUP_CORE_0,
QCM2290_MASTER_CAMNOC_SF,
QCM2290_MASTER_VIDEO_P0,
QCM2290_MASTER_VIDEO_PROC,
QCM2290_MASTER_CAMNOC_HF,
QCM2290_MASTER_MDP0,
QCM2290_SLAVE_EBI1,
QCM2290_SLAVE_BIMC_SNOC,
QCM2290_SLAVE_BIMC_CFG,
QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG,
QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG,
QCM2290_SLAVE_CAMERA_CFG,
QCM2290_SLAVE_CLK_CTL,
QCM2290_SLAVE_CRYPTO_0_CFG,
QCM2290_SLAVE_DISPLAY_CFG,
QCM2290_SLAVE_DISPLAY_THROTTLE_CFG,
QCM2290_SLAVE_GPU_CFG,
QCM2290_SLAVE_HWKM,
QCM2290_SLAVE_IMEM_CFG,
QCM2290_SLAVE_IPA_CFG,
QCM2290_SLAVE_LPASS,
QCM2290_SLAVE_MESSAGE_RAM,
QCM2290_SLAVE_PDM,
QCM2290_SLAVE_PIMEM_CFG,
QCM2290_SLAVE_PKA_WRAPPER,
QCM2290_SLAVE_PMIC_ARB,
QCM2290_SLAVE_PRNG,
QCM2290_SLAVE_QDSS_CFG,
QCM2290_SLAVE_QM_CFG,
QCM2290_SLAVE_QM_MPU_CFG,
QCM2290_SLAVE_QPIC,
QCM2290_SLAVE_QUP_0,
QCM2290_SLAVE_SDCC_1,
QCM2290_SLAVE_SDCC_2,
QCM2290_SLAVE_SNOC_CFG,
QCM2290_SLAVE_TCSR,
QCM2290_SLAVE_USB3,
QCM2290_SLAVE_VENUS_CFG,
QCM2290_SLAVE_VENUS_THROTTLE_CFG,
QCM2290_SLAVE_VSENSE_CTRL_CFG,
QCM2290_SLAVE_SERVICE_CNOC,
QCM2290_SLAVE_APPSS,
QCM2290_SLAVE_SNOC_CNOC,
QCM2290_SLAVE_IMEM,
QCM2290_SLAVE_PIMEM,
QCM2290_SLAVE_SNOC_BIMC,
QCM2290_SLAVE_SERVICE_SNOC,
QCM2290_SLAVE_QDSS_STM,
QCM2290_SLAVE_TCU,
QCM2290_SLAVE_ANOC_SNOC,
QCM2290_SLAVE_QUP_CORE_0,
QCM2290_SLAVE_SNOC_BIMC_NRT,
QCM2290_SLAVE_SNOC_BIMC_RT,
};
/* Master nodes */
static const u16 mas_appss_proc_links[] = {
QCM2290_SLAVE_EBI1,
QCM2290_SLAVE_BIMC_SNOC,
};
static struct qcom_icc_node mas_appss_proc = {
.id = QCM2290_MASTER_APPSS_PROC,
.name = "mas_apps_proc",
.buswidth = 16,
.qos.ap_owned = true,
.qos.qos_port = 0,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.prio_level = 0,
.qos.areq_prio = 0,
.mas_rpm_id = 0,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_appss_proc_links),
.links = mas_appss_proc_links,
};
static const u16 mas_snoc_bimc_rt_links[] = {
QCM2290_SLAVE_EBI1,
};
static struct qcom_icc_node mas_snoc_bimc_rt = {
.id = QCM2290_MASTER_SNOC_BIMC_RT,
.name = "mas_snoc_bimc_rt",
.buswidth = 16,
.qos.ap_owned = true,
.qos.qos_port = 2,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.mas_rpm_id = 163,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_bimc_rt_links),
.links = mas_snoc_bimc_rt_links,
};
static const u16 mas_snoc_bimc_nrt_links[] = {
QCM2290_SLAVE_EBI1,
};
static struct qcom_icc_node mas_snoc_bimc_nrt = {
.id = QCM2290_MASTER_SNOC_BIMC_NRT,
.name = "mas_snoc_bimc_nrt",
.buswidth = 16,
.qos.ap_owned = true,
.qos.qos_port = 3,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.mas_rpm_id = 164,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_bimc_nrt_links),
.links = mas_snoc_bimc_nrt_links,
};
static const u16 mas_snoc_bimc_links[] = {
QCM2290_SLAVE_EBI1,
};
static struct qcom_icc_node mas_snoc_bimc = {
.id = QCM2290_MASTER_SNOC_BIMC,
.name = "mas_snoc_bimc",
.buswidth = 16,
.qos.ap_owned = true,
.qos.qos_port = 2,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.mas_rpm_id = 164,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_bimc_links),
.links = mas_snoc_bimc_links,
};
static const u16 mas_tcu_0_links[] = {
QCM2290_SLAVE_EBI1,
QCM2290_SLAVE_BIMC_SNOC,
};
static struct qcom_icc_node mas_tcu_0 = {
.id = QCM2290_MASTER_TCU_0,
.name = "mas_tcu_0",
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_port = 4,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.prio_level = 6,
.qos.areq_prio = 6,
.mas_rpm_id = 102,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_tcu_0_links),
.links = mas_tcu_0_links,
};
static const u16 mas_snoc_cnoc_links[] = {
QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG,
QCM2290_SLAVE_SDCC_2,
QCM2290_SLAVE_SDCC_1,
QCM2290_SLAVE_QM_CFG,
QCM2290_SLAVE_BIMC_CFG,
QCM2290_SLAVE_USB3,
QCM2290_SLAVE_QM_MPU_CFG,
QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG,
QCM2290_SLAVE_QDSS_CFG,
QCM2290_SLAVE_PDM,
QCM2290_SLAVE_IPA_CFG,
QCM2290_SLAVE_DISPLAY_THROTTLE_CFG,
QCM2290_SLAVE_TCSR,
QCM2290_SLAVE_MESSAGE_RAM,
QCM2290_SLAVE_PMIC_ARB,
QCM2290_SLAVE_LPASS,
QCM2290_SLAVE_DISPLAY_CFG,
QCM2290_SLAVE_VENUS_CFG,
QCM2290_SLAVE_GPU_CFG,
QCM2290_SLAVE_IMEM_CFG,
QCM2290_SLAVE_SNOC_CFG,
QCM2290_SLAVE_SERVICE_CNOC,
QCM2290_SLAVE_VENUS_THROTTLE_CFG,
QCM2290_SLAVE_PKA_WRAPPER,
QCM2290_SLAVE_HWKM,
QCM2290_SLAVE_PRNG,
QCM2290_SLAVE_VSENSE_CTRL_CFG,
QCM2290_SLAVE_CRYPTO_0_CFG,
QCM2290_SLAVE_PIMEM_CFG,
QCM2290_SLAVE_QUP_0,
QCM2290_SLAVE_CAMERA_CFG,
QCM2290_SLAVE_CLK_CTL,
QCM2290_SLAVE_QPIC,
};
static struct qcom_icc_node mas_snoc_cnoc = {
.id = QCM2290_MASTER_SNOC_CNOC,
.name = "mas_snoc_cnoc",
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = 52,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_cnoc_links),
.links = mas_snoc_cnoc_links,
};
static const u16 mas_qdss_dap_links[] = {
QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG,
QCM2290_SLAVE_SDCC_2,
QCM2290_SLAVE_SDCC_1,
QCM2290_SLAVE_QM_CFG,
QCM2290_SLAVE_BIMC_CFG,
QCM2290_SLAVE_USB3,
QCM2290_SLAVE_QM_MPU_CFG,
QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG,
QCM2290_SLAVE_QDSS_CFG,
QCM2290_SLAVE_PDM,
QCM2290_SLAVE_IPA_CFG,
QCM2290_SLAVE_DISPLAY_THROTTLE_CFG,
QCM2290_SLAVE_TCSR,
QCM2290_SLAVE_MESSAGE_RAM,
QCM2290_SLAVE_PMIC_ARB,
QCM2290_SLAVE_LPASS,
QCM2290_SLAVE_DISPLAY_CFG,
QCM2290_SLAVE_VENUS_CFG,
QCM2290_SLAVE_GPU_CFG,
QCM2290_SLAVE_IMEM_CFG,
QCM2290_SLAVE_SNOC_CFG,
QCM2290_SLAVE_SERVICE_CNOC,
QCM2290_SLAVE_VENUS_THROTTLE_CFG,
QCM2290_SLAVE_PKA_WRAPPER,
QCM2290_SLAVE_HWKM,
QCM2290_SLAVE_PRNG,
QCM2290_SLAVE_VSENSE_CTRL_CFG,
QCM2290_SLAVE_CRYPTO_0_CFG,
QCM2290_SLAVE_PIMEM_CFG,
QCM2290_SLAVE_QUP_0,
QCM2290_SLAVE_CAMERA_CFG,
QCM2290_SLAVE_CLK_CTL,
QCM2290_SLAVE_QPIC,
};
static struct qcom_icc_node mas_qdss_dap = {
.id = QCM2290_MASTER_QDSS_DAP,
.name = "mas_qdss_dap",
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = 49,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_qdss_dap_links),
.links = mas_qdss_dap_links,
};
static const u16 mas_crypto_core0_links[] = {
QCM2290_SLAVE_ANOC_SNOC
};
static struct qcom_icc_node mas_crypto_core0 = {
.id = QCM2290_MASTER_CRYPTO_CORE0,
.name = "mas_crypto_core0",
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_port = 22,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.mas_rpm_id = 23,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_crypto_core0_links),
.links = mas_crypto_core0_links,
};
static const u16 mas_qup_core_0_links[] = {
QCM2290_SLAVE_QUP_CORE_0,
};
static struct qcom_icc_node mas_qup_core_0 = {
.id = QCM2290_MASTER_QUP_CORE_0,
.name = "mas_qup_core_0",
.buswidth = 4,
.mas_rpm_id = 170,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_qup_core_0_links),
.links = mas_qup_core_0_links,
};
static const u16 mas_camnoc_sf_links[] = {
QCM2290_SLAVE_SNOC_BIMC_NRT,
};
static struct qcom_icc_node mas_camnoc_sf = {
.id = QCM2290_MASTER_CAMNOC_SF,
.name = "mas_camnoc_sf",
.buswidth = 32,
.qos.ap_owned = true,
.qos.qos_port = 4,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 3,
.mas_rpm_id = 172,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_camnoc_sf_links),
.links = mas_camnoc_sf_links,
};
static const u16 mas_camnoc_hf_links[] = {
QCM2290_SLAVE_SNOC_BIMC_RT,
};
static struct qcom_icc_node mas_camnoc_hf = {
.id = QCM2290_MASTER_CAMNOC_HF,
.name = "mas_camnoc_hf",
.buswidth = 32,
.qos.ap_owned = true,
.qos.qos_port = 10,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 3,
.qos.urg_fwd_en = true,
.mas_rpm_id = 173,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_camnoc_hf_links),
.links = mas_camnoc_hf_links,
};
static const u16 mas_mdp0_links[] = {
QCM2290_SLAVE_SNOC_BIMC_RT,
};
static struct qcom_icc_node mas_mdp0 = {
.id = QCM2290_MASTER_MDP0,
.name = "mas_mdp0",
.buswidth = 16,
.qos.ap_owned = true,
.qos.qos_port = 5,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 3,
.qos.urg_fwd_en = true,
.mas_rpm_id = 8,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_mdp0_links),
.links = mas_mdp0_links,
};
static const u16 mas_video_p0_links[] = {
QCM2290_SLAVE_SNOC_BIMC_NRT,
};
static struct qcom_icc_node mas_video_p0 = {
.id = QCM2290_MASTER_VIDEO_P0,
.name = "mas_video_p0",
.buswidth = 16,
.qos.ap_owned = true,
.qos.qos_port = 9,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 3,
.qos.urg_fwd_en = true,
.mas_rpm_id = 9,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_video_p0_links),
.links = mas_video_p0_links,
};
static const u16 mas_video_proc_links[] = {
QCM2290_SLAVE_SNOC_BIMC_NRT,
};
static struct qcom_icc_node mas_video_proc = {
.id = QCM2290_MASTER_VIDEO_PROC,
.name = "mas_video_proc",
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_port = 13,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 4,
.mas_rpm_id = 168,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_video_proc_links),
.links = mas_video_proc_links,
};
static const u16 mas_snoc_cfg_links[] = {
QCM2290_SLAVE_SERVICE_SNOC,
};
static struct qcom_icc_node mas_snoc_cfg = {
.id = QCM2290_MASTER_SNOC_CFG,
.name = "mas_snoc_cfg",
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = 20,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_cfg_links),
.links = mas_snoc_cfg_links,
};
static const u16 mas_tic_links[] = {
QCM2290_SLAVE_PIMEM,
QCM2290_SLAVE_IMEM,
QCM2290_SLAVE_APPSS,
QCM2290_SLAVE_SNOC_BIMC,
QCM2290_SLAVE_SNOC_CNOC,
QCM2290_SLAVE_TCU,
QCM2290_SLAVE_QDSS_STM,
};
static struct qcom_icc_node mas_tic = {
.id = QCM2290_MASTER_TIC,
.name = "mas_tic",
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_port = 8,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.mas_rpm_id = 51,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_tic_links),
.links = mas_tic_links,
};
static const u16 mas_anoc_snoc_links[] = {
QCM2290_SLAVE_PIMEM,
QCM2290_SLAVE_IMEM,
QCM2290_SLAVE_APPSS,
QCM2290_SLAVE_SNOC_BIMC,
QCM2290_SLAVE_SNOC_CNOC,
QCM2290_SLAVE_TCU,
QCM2290_SLAVE_QDSS_STM,
};
static struct qcom_icc_node mas_anoc_snoc = {
.id = QCM2290_MASTER_ANOC_SNOC,
.name = "mas_anoc_snoc",
.buswidth = 16,
.mas_rpm_id = 110,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_anoc_snoc_links),
.links = mas_anoc_snoc_links,
};
static const u16 mas_bimc_snoc_links[] = {
QCM2290_SLAVE_PIMEM,
QCM2290_SLAVE_IMEM,
QCM2290_SLAVE_APPSS,
QCM2290_SLAVE_SNOC_CNOC,
QCM2290_SLAVE_TCU,
QCM2290_SLAVE_QDSS_STM,
};
static struct qcom_icc_node mas_bimc_snoc = {
.id = QCM2290_MASTER_BIMC_SNOC,
.name = "mas_bimc_snoc",
.buswidth = 8,
.mas_rpm_id = 21,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_bimc_snoc_links),
.links = mas_bimc_snoc_links,
};
static const u16 mas_pimem_links[] = {
QCM2290_SLAVE_IMEM,
QCM2290_SLAVE_SNOC_BIMC,
};
static struct qcom_icc_node mas_pimem = {
.id = QCM2290_MASTER_PIMEM,
.name = "mas_pimem",
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_port = 20,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.mas_rpm_id = 113,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pimem_links),
.links = mas_pimem_links,
};
static const u16 mas_qdss_bam_links[] = {
QCM2290_SLAVE_ANOC_SNOC,
};
static struct qcom_icc_node mas_qdss_bam = {
.id = QCM2290_MASTER_QDSS_BAM,
.name = "mas_qdss_bam",
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_port = 2,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.mas_rpm_id = 19,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_qdss_bam_links),
.links = mas_qdss_bam_links,
};
static const u16 mas_qup_0_links[] = {
QCM2290_SLAVE_ANOC_SNOC,
};
static struct qcom_icc_node mas_qup_0 = {
.id = QCM2290_MASTER_QUP_0,
.name = "mas_qup_0",
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_port = 0,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.mas_rpm_id = 166,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_qup_0_links),
.links = mas_qup_0_links,
};
static const u16 mas_ipa_links[] = {
QCM2290_SLAVE_ANOC_SNOC,
};
static struct qcom_icc_node mas_ipa = {
.id = QCM2290_MASTER_IPA,
.name = "mas_ipa",
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_port = 3,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.mas_rpm_id = 59,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_ipa_links),
.links = mas_ipa_links,
};
static const u16 mas_qdss_etr_links[] = {
QCM2290_SLAVE_ANOC_SNOC,
};
static struct qcom_icc_node mas_qdss_etr = {
.id = QCM2290_MASTER_QDSS_ETR,
.name = "mas_qdss_etr",
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_port = 12,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.mas_rpm_id = 31,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_qdss_etr_links),
.links = mas_qdss_etr_links,
};
static const u16 mas_sdcc_1_links[] = {
QCM2290_SLAVE_ANOC_SNOC,
};
static struct qcom_icc_node mas_sdcc_1 = {
.id = QCM2290_MASTER_SDCC_1,
.name = "mas_sdcc_1",
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_port = 17,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.mas_rpm_id = 33,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_sdcc_1_links),
.links = mas_sdcc_1_links,
};
static const u16 mas_sdcc_2_links[] = {
QCM2290_SLAVE_ANOC_SNOC,
};
static struct qcom_icc_node mas_sdcc_2 = {
.id = QCM2290_MASTER_SDCC_2,
.name = "mas_sdcc_2",
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_port = 23,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.mas_rpm_id = 35,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_sdcc_2_links),
.links = mas_sdcc_2_links,
};
static const u16 mas_qpic_links[] = {
QCM2290_SLAVE_ANOC_SNOC,
};
static struct qcom_icc_node mas_qpic = {
.id = QCM2290_MASTER_QPIC,
.name = "mas_qpic",
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_port = 1,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.mas_rpm_id = 58,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_qpic_links),
.links = mas_qpic_links,
};
static const u16 mas_usb3_0_links[] = {
QCM2290_SLAVE_ANOC_SNOC,
};
static struct qcom_icc_node mas_usb3_0 = {
.id = QCM2290_MASTER_USB3_0,
.name = "mas_usb3_0",
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_port = 24,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.mas_rpm_id = 32,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_usb3_0_links),
.links = mas_usb3_0_links,
};
static const u16 mas_gfx3d_links[] = {
QCM2290_SLAVE_EBI1,
};
static struct qcom_icc_node mas_gfx3d = {
.id = QCM2290_MASTER_GFX3D,
.name = "mas_gfx3d",
.buswidth = 32,
.qos.ap_owned = true,
.qos.qos_port = 1,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.prio_level = 0,
.qos.areq_prio = 0,
.mas_rpm_id = 6,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_gfx3d_links),
.links = mas_gfx3d_links,
};
/* Slave nodes */
static struct qcom_icc_node slv_ebi1 = {
.name = "slv_ebi1",
.id = QCM2290_SLAVE_EBI1,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 0,
};
static const u16 slv_bimc_snoc_links[] = {
QCM2290_MASTER_BIMC_SNOC,
};
static struct qcom_icc_node slv_bimc_snoc = {
.name = "slv_bimc_snoc",
.id = QCM2290_SLAVE_BIMC_SNOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 2,
.num_links = ARRAY_SIZE(slv_bimc_snoc_links),
.links = slv_bimc_snoc_links,
};
static struct qcom_icc_node slv_bimc_cfg = {
.name = "slv_bimc_cfg",
.id = QCM2290_SLAVE_BIMC_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 56,
};
static struct qcom_icc_node slv_camera_nrt_throttle_cfg = {
.name = "slv_camera_nrt_throttle_cfg",
.id = QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 271,
};
static struct qcom_icc_node slv_camera_rt_throttle_cfg = {
.name = "slv_camera_rt_throttle_cfg",
.id = QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 279,
};
static struct qcom_icc_node slv_camera_cfg = {
.name = "slv_camera_cfg",
.id = QCM2290_SLAVE_CAMERA_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 3,
};
static struct qcom_icc_node slv_clk_ctl = {
.name = "slv_clk_ctl",
.id = QCM2290_SLAVE_CLK_CTL,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 47,
};
static struct qcom_icc_node slv_crypto_0_cfg = {
.name = "slv_crypto_0_cfg",
.id = QCM2290_SLAVE_CRYPTO_0_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 52,
};
static struct qcom_icc_node slv_display_cfg = {
.name = "slv_display_cfg",
.id = QCM2290_SLAVE_DISPLAY_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 4,
};
static struct qcom_icc_node slv_display_throttle_cfg = {
.name = "slv_display_throttle_cfg",
.id = QCM2290_SLAVE_DISPLAY_THROTTLE_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 156,
};
static struct qcom_icc_node slv_gpu_cfg = {
.name = "slv_gpu_cfg",
.id = QCM2290_SLAVE_GPU_CFG,
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 275,
};
static struct qcom_icc_node slv_hwkm = {
.name = "slv_hwkm",
.id = QCM2290_SLAVE_HWKM,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 280,
};
static struct qcom_icc_node slv_imem_cfg = {
.name = "slv_imem_cfg",
.id = QCM2290_SLAVE_IMEM_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 54,
};
static struct qcom_icc_node slv_ipa_cfg = {
.name = "slv_ipa_cfg",
.id = QCM2290_SLAVE_IPA_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 183,
};
static struct qcom_icc_node slv_lpass = {
.name = "slv_lpass",
.id = QCM2290_SLAVE_LPASS,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 21,
};
static struct qcom_icc_node slv_message_ram = {
.name = "slv_message_ram",
.id = QCM2290_SLAVE_MESSAGE_RAM,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 55,
};
static struct qcom_icc_node slv_pdm = {
.name = "slv_pdm",
.id = QCM2290_SLAVE_PDM,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 41,
};
static struct qcom_icc_node slv_pimem_cfg = {
.name = "slv_pimem_cfg",
.id = QCM2290_SLAVE_PIMEM_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 167,
};
static struct qcom_icc_node slv_pka_wrapper = {
.name = "slv_pka_wrapper",
.id = QCM2290_SLAVE_PKA_WRAPPER,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 281,
};
static struct qcom_icc_node slv_pmic_arb = {
.name = "slv_pmic_arb",
.id = QCM2290_SLAVE_PMIC_ARB,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 59,
};
static struct qcom_icc_node slv_prng = {
.name = "slv_prng",
.id = QCM2290_SLAVE_PRNG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 44,
};
static struct qcom_icc_node slv_qdss_cfg = {
.name = "slv_qdss_cfg",
.id = QCM2290_SLAVE_QDSS_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 63,
};
static struct qcom_icc_node slv_qm_cfg = {
.name = "slv_qm_cfg",
.id = QCM2290_SLAVE_QM_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 212,
};
static struct qcom_icc_node slv_qm_mpu_cfg = {
.name = "slv_qm_mpu_cfg",
.id = QCM2290_SLAVE_QM_MPU_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 231,
};
static struct qcom_icc_node slv_qpic = {
.name = "slv_qpic",
.id = QCM2290_SLAVE_QPIC,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 80,
};
static struct qcom_icc_node slv_qup_0 = {
.name = "slv_qup_0",
.id = QCM2290_SLAVE_QUP_0,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 261,
};
static struct qcom_icc_node slv_sdcc_1 = {
.name = "slv_sdcc_1",
.id = QCM2290_SLAVE_SDCC_1,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 31,
};
static struct qcom_icc_node slv_sdcc_2 = {
.name = "slv_sdcc_2",
.id = QCM2290_SLAVE_SDCC_2,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 33,
};
static const u16 slv_snoc_cfg_links[] = {
QCM2290_MASTER_SNOC_CFG,
};
static struct qcom_icc_node slv_snoc_cfg = {
.name = "slv_snoc_cfg",
.id = QCM2290_SLAVE_SNOC_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 70,
.num_links = ARRAY_SIZE(slv_snoc_cfg_links),
.links = slv_snoc_cfg_links,
};
static struct qcom_icc_node slv_tcsr = {
.name = "slv_tcsr",
.id = QCM2290_SLAVE_TCSR,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 50,
};
static struct qcom_icc_node slv_usb3 = {
.name = "slv_usb3",
.id = QCM2290_SLAVE_USB3,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 22,
};
static struct qcom_icc_node slv_venus_cfg = {
.name = "slv_venus_cfg",
.id = QCM2290_SLAVE_VENUS_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 10,
};
static struct qcom_icc_node slv_venus_throttle_cfg = {
.name = "slv_venus_throttle_cfg",
.id = QCM2290_SLAVE_VENUS_THROTTLE_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 178,
};
static struct qcom_icc_node slv_vsense_ctrl_cfg = {
.name = "slv_vsense_ctrl_cfg",
.id = QCM2290_SLAVE_VSENSE_CTRL_CFG,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 263,
};
static struct qcom_icc_node slv_service_cnoc = {
.name = "slv_service_cnoc",
.id = QCM2290_SLAVE_SERVICE_CNOC,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 76,
};
static struct qcom_icc_node slv_qup_core_0 = {
.name = "slv_qup_core_0",
.id = QCM2290_SLAVE_QUP_CORE_0,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 264,
};
static const u16 slv_snoc_bimc_nrt_links[] = {
QCM2290_MASTER_SNOC_BIMC_NRT,
};
static struct qcom_icc_node slv_snoc_bimc_nrt = {
.name = "slv_snoc_bimc_nrt",
.id = QCM2290_SLAVE_SNOC_BIMC_NRT,
.buswidth = 16,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 259,
.num_links = ARRAY_SIZE(slv_snoc_bimc_nrt_links),
.links = slv_snoc_bimc_nrt_links,
};
static const u16 slv_snoc_bimc_rt_links[] = {
QCM2290_MASTER_SNOC_BIMC_RT,
};
static struct qcom_icc_node slv_snoc_bimc_rt = {
.name = "slv_snoc_bimc_rt",
.id = QCM2290_SLAVE_SNOC_BIMC_RT,
.buswidth = 16,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 260,
.num_links = ARRAY_SIZE(slv_snoc_bimc_rt_links),
.links = slv_snoc_bimc_rt_links,
};
static struct qcom_icc_node slv_appss = {
.name = "slv_appss",
.id = QCM2290_SLAVE_APPSS,
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 20,
};
static const u16 slv_snoc_cnoc_links[] = {
QCM2290_MASTER_SNOC_CNOC,
};
static struct qcom_icc_node slv_snoc_cnoc = {
.name = "slv_snoc_cnoc",
.id = QCM2290_SLAVE_SNOC_CNOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 25,
.num_links = ARRAY_SIZE(slv_snoc_cnoc_links),
.links = slv_snoc_cnoc_links,
};
static struct qcom_icc_node slv_imem = {
.name = "slv_imem",
.id = QCM2290_SLAVE_IMEM,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 26,
};
static struct qcom_icc_node slv_pimem = {
.name = "slv_pimem",
.id = QCM2290_SLAVE_PIMEM,
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 166,
};
static const u16 slv_snoc_bimc_links[] = {
QCM2290_MASTER_SNOC_BIMC,
};
static struct qcom_icc_node slv_snoc_bimc = {
.name = "slv_snoc_bimc",
.id = QCM2290_SLAVE_SNOC_BIMC,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 24,
.num_links = ARRAY_SIZE(slv_snoc_bimc_links),
.links = slv_snoc_bimc_links,
};
static struct qcom_icc_node slv_service_snoc = {
.name = "slv_service_snoc",
.id = QCM2290_SLAVE_SERVICE_SNOC,
.buswidth = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 29,
};
static struct qcom_icc_node slv_qdss_stm = {
.name = "slv_qdss_stm",
.id = QCM2290_SLAVE_QDSS_STM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 30,
};
static struct qcom_icc_node slv_tcu = {
.name = "slv_tcu",
.id = QCM2290_SLAVE_TCU,
.buswidth = 8,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.mas_rpm_id = -1,
.slv_rpm_id = 133,
};
static const u16 slv_anoc_snoc_links[] = {
QCM2290_MASTER_ANOC_SNOC,
};
static struct qcom_icc_node slv_anoc_snoc = {
.name = "slv_anoc_snoc",
.id = QCM2290_SLAVE_ANOC_SNOC,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 141,
.num_links = ARRAY_SIZE(slv_anoc_snoc_links),
.links = slv_anoc_snoc_links,
};
/* NoC descriptors */
static struct qcom_icc_node * const qcm2290_bimc_nodes[] = {
[MASTER_APPSS_PROC] = &mas_appss_proc,
[MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt,
[MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt,
[MASTER_SNOC_BIMC] = &mas_snoc_bimc,
[MASTER_TCU_0] = &mas_tcu_0,
[MASTER_GFX3D] = &mas_gfx3d,
[SLAVE_EBI1] = &slv_ebi1,
[SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
};
static const struct regmap_config qcm2290_bimc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x80000,
.fast_io = true,
};
static const struct qcom_icc_desc qcm2290_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = qcm2290_bimc_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_bimc_nodes),
.bus_clk_desc = &bimc_clk,
.regmap_cfg = &qcm2290_bimc_regmap_config,
.keep_alive = true,
/* M_REG_BASE() in vendor msm_bus_bimc_adhoc driver */
.qos_offset = 0x8000,
};
static struct qcom_icc_node * const qcm2290_cnoc_nodes[] = {
[MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
[MASTER_QDSS_DAP] = &mas_qdss_dap,
[SLAVE_BIMC_CFG] = &slv_bimc_cfg,
[SLAVE_CAMERA_NRT_THROTTLE_CFG] = &slv_camera_nrt_throttle_cfg,
[SLAVE_CAMERA_RT_THROTTLE_CFG] = &slv_camera_rt_throttle_cfg,
[SLAVE_CAMERA_CFG] = &slv_camera_cfg,
[SLAVE_CLK_CTL] = &slv_clk_ctl,
[SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
[SLAVE_DISPLAY_CFG] = &slv_display_cfg,
[SLAVE_DISPLAY_THROTTLE_CFG] = &slv_display_throttle_cfg,
[SLAVE_GPU_CFG] = &slv_gpu_cfg,
[SLAVE_HWKM] = &slv_hwkm,
[SLAVE_IMEM_CFG] = &slv_imem_cfg,
[SLAVE_IPA_CFG] = &slv_ipa_cfg,
[SLAVE_LPASS] = &slv_lpass,
[SLAVE_MESSAGE_RAM] = &slv_message_ram,
[SLAVE_PDM] = &slv_pdm,
[SLAVE_PIMEM_CFG] = &slv_pimem_cfg,
[SLAVE_PKA_WRAPPER] = &slv_pka_wrapper,
[SLAVE_PMIC_ARB] = &slv_pmic_arb,
[SLAVE_PRNG] = &slv_prng,
[SLAVE_QDSS_CFG] = &slv_qdss_cfg,
[SLAVE_QM_CFG] = &slv_qm_cfg,
[SLAVE_QM_MPU_CFG] = &slv_qm_mpu_cfg,
[SLAVE_QPIC] = &slv_qpic,
[SLAVE_QUP_0] = &slv_qup_0,
[SLAVE_SDCC_1] = &slv_sdcc_1,
[SLAVE_SDCC_2] = &slv_sdcc_2,
[SLAVE_SNOC_CFG] = &slv_snoc_cfg,
[SLAVE_TCSR] = &slv_tcsr,
[SLAVE_USB3] = &slv_usb3,
[SLAVE_VENUS_CFG] = &slv_venus_cfg,
[SLAVE_VENUS_THROTTLE_CFG] = &slv_venus_throttle_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &slv_vsense_ctrl_cfg,
[SLAVE_SERVICE_CNOC] = &slv_service_cnoc,
};
static const struct regmap_config qcm2290_cnoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x8200,
.fast_io = true,
};
static const struct qcom_icc_desc qcm2290_cnoc = {
.type = QCOM_ICC_NOC,
.nodes = qcm2290_cnoc_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_cnoc_nodes),
.bus_clk_desc = &bus_1_clk,
.regmap_cfg = &qcm2290_cnoc_regmap_config,
.keep_alive = true,
};
static struct qcom_icc_node * const qcm2290_snoc_nodes[] = {
[MASTER_CRYPTO_CORE0] = &mas_crypto_core0,
[MASTER_SNOC_CFG] = &mas_snoc_cfg,
[MASTER_TIC] = &mas_tic,
[MASTER_ANOC_SNOC] = &mas_anoc_snoc,
[MASTER_BIMC_SNOC] = &mas_bimc_snoc,
[MASTER_PIMEM] = &mas_pimem,
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_QUP_0] = &mas_qup_0,
[MASTER_IPA] = &mas_ipa,
[MASTER_QDSS_ETR] = &mas_qdss_etr,
[MASTER_SDCC_1] = &mas_sdcc_1,
[MASTER_SDCC_2] = &mas_sdcc_2,
[MASTER_QPIC] = &mas_qpic,
[MASTER_USB3_0] = &mas_usb3_0,
[SLAVE_APPSS] = &slv_appss,
[SLAVE_SNOC_CNOC] = &slv_snoc_cnoc,
[SLAVE_IMEM] = &slv_imem,
[SLAVE_PIMEM] = &slv_pimem,
[SLAVE_SNOC_BIMC] = &slv_snoc_bimc,
[SLAVE_SERVICE_SNOC] = &slv_service_snoc,
[SLAVE_QDSS_STM] = &slv_qdss_stm,
[SLAVE_TCU] = &slv_tcu,
[SLAVE_ANOC_SNOC] = &slv_anoc_snoc,
};
static const struct regmap_config qcm2290_snoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x60200,
.fast_io = true,
};
static const struct qcom_icc_desc qcm2290_snoc = {
.type = QCOM_ICC_QNOC,
.nodes = qcm2290_snoc_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_snoc_nodes),
.bus_clk_desc = &bus_2_clk,
.regmap_cfg = &qcm2290_snoc_regmap_config,
.keep_alive = true,
/* Vendor DT node fab-sys_noc property 'qcom,base-offset' */
.qos_offset = 0x15000,
};
static struct qcom_icc_node * const qcm2290_qup_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &mas_qup_core_0,
[SLAVE_QUP_CORE_0] = &slv_qup_core_0
};
static const struct qcom_icc_desc qcm2290_qup_virt = {
.type = QCOM_ICC_QNOC,
.nodes = qcm2290_qup_virt_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_qup_virt_nodes),
.bus_clk_desc = &qup_clk,
.keep_alive = true,
};
static struct qcom_icc_node * const qcm2290_mmnrt_virt_nodes[] = {
[MASTER_CAMNOC_SF] = &mas_camnoc_sf,
[MASTER_VIDEO_P0] = &mas_video_p0,
[MASTER_VIDEO_PROC] = &mas_video_proc,
[SLAVE_SNOC_BIMC_NRT] = &slv_snoc_bimc_nrt,
};
static const struct qcom_icc_desc qcm2290_mmnrt_virt = {
.type = QCOM_ICC_QNOC,
.nodes = qcm2290_mmnrt_virt_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_mmnrt_virt_nodes),
.bus_clk_desc = &mmaxi_0_clk,
.regmap_cfg = &qcm2290_snoc_regmap_config,
.keep_alive = true,
.qos_offset = 0x15000,
};
static struct qcom_icc_node * const qcm2290_mmrt_virt_nodes[] = {
[MASTER_CAMNOC_HF] = &mas_camnoc_hf,
[MASTER_MDP0] = &mas_mdp0,
[SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt,
};
static const struct qcom_icc_desc qcm2290_mmrt_virt = {
.type = QCOM_ICC_QNOC,
.nodes = qcm2290_mmrt_virt_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_mmrt_virt_nodes),
.bus_clk_desc = &mmaxi_1_clk,
.regmap_cfg = &qcm2290_snoc_regmap_config,
.keep_alive = true,
.qos_offset = 0x15000,
};
static const struct of_device_id qcm2290_noc_of_match[] = {
{ .compatible = "qcom,qcm2290-bimc", .data = &qcm2290_bimc },
{ .compatible = "qcom,qcm2290-cnoc", .data = &qcm2290_cnoc },
{ .compatible = "qcom,qcm2290-snoc", .data = &qcm2290_snoc },
{ .compatible = "qcom,qcm2290-qup-virt", .data = &qcm2290_qup_virt },
{ .compatible = "qcom,qcm2290-mmrt-virt", .data = &qcm2290_mmrt_virt },
{ .compatible = "qcom,qcm2290-mmnrt-virt", .data = &qcm2290_mmnrt_virt },
{ },
};
MODULE_DEVICE_TABLE(of, qcm2290_noc_of_match);
static struct platform_driver qcm2290_noc_driver = {
.probe = qnoc_probe,
.remove = qnoc_remove,
.driver = {
.name = "qnoc-qcm2290",
.of_match_table = qcm2290_noc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qcm2290_noc_driver);
MODULE_DESCRIPTION("Qualcomm QCM2290 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/qcm2290.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sc7180.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sc7180.h"
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
.id = SC7180_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_SLAVE_SERVICE_A1NOC },
};
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
.id = SC7180_MASTER_QSPI,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qup_0 = {
.name = "qhm_qup_0",
.id = SC7180_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
.id = SC7180_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_emmc = {
.name = "xm_emmc",
.id = SC7180_MASTER_EMMC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SC7180_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
.id = SC7180_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_SLAVE_SERVICE_A2NOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SC7180_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qhm_qup_1 = {
.name = "qhm_qup_1",
.id = SC7180_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SC7180_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SC7180_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
.id = SC7180_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qhm_usb3 = {
.name = "qhm_usb3",
.id = SC7180_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
.name = "qxm_camnoc_hf0_uncomp",
.id = SC7180_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
.name = "qxm_camnoc_hf1_uncomp",
.id = SC7180_MASTER_CAMNOC_HF1_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
.name = "qxm_camnoc_sf_uncomp",
.id = SC7180_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qnm_npu = {
.name = "qnm_npu",
.id = SC7180_MASTER_NPU,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_SLAVE_CDSP_GEM_NOC },
};
static struct qcom_icc_node qxm_npu_dsp = {
.name = "qxm_npu_dsp",
.id = SC7180_MASTER_NPU_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_SLAVE_CDSP_GEM_NOC },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
.id = SC7180_MASTER_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 51,
.links = { SC7180_SLAVE_A1NOC_CFG,
SC7180_SLAVE_A2NOC_CFG,
SC7180_SLAVE_AHB2PHY_SOUTH,
SC7180_SLAVE_AHB2PHY_CENTER,
SC7180_SLAVE_AOP,
SC7180_SLAVE_AOSS,
SC7180_SLAVE_BOOT_ROM,
SC7180_SLAVE_CAMERA_CFG,
SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG,
SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG,
SC7180_SLAVE_CLK_CTL,
SC7180_SLAVE_RBCPR_CX_CFG,
SC7180_SLAVE_RBCPR_MX_CFG,
SC7180_SLAVE_CRYPTO_0_CFG,
SC7180_SLAVE_DCC_CFG,
SC7180_SLAVE_CNOC_DDRSS,
SC7180_SLAVE_DISPLAY_CFG,
SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG,
SC7180_SLAVE_DISPLAY_THROTTLE_CFG,
SC7180_SLAVE_EMMC_CFG,
SC7180_SLAVE_GLM,
SC7180_SLAVE_GFX3D_CFG,
SC7180_SLAVE_IMEM_CFG,
SC7180_SLAVE_IPA_CFG,
SC7180_SLAVE_CNOC_MNOC_CFG,
SC7180_SLAVE_CNOC_MSS,
SC7180_SLAVE_NPU_CFG,
SC7180_SLAVE_NPU_DMA_BWMON_CFG,
SC7180_SLAVE_NPU_PROC_BWMON_CFG,
SC7180_SLAVE_PDM,
SC7180_SLAVE_PIMEM_CFG,
SC7180_SLAVE_PRNG,
SC7180_SLAVE_QDSS_CFG,
SC7180_SLAVE_QM_CFG,
SC7180_SLAVE_QM_MPU_CFG,
SC7180_SLAVE_QSPI_0,
SC7180_SLAVE_QUP_0,
SC7180_SLAVE_QUP_1,
SC7180_SLAVE_SDCC_2,
SC7180_SLAVE_SECURITY,
SC7180_SLAVE_SNOC_CFG,
SC7180_SLAVE_TCSR,
SC7180_SLAVE_TLMM_WEST,
SC7180_SLAVE_TLMM_NORTH,
SC7180_SLAVE_TLMM_SOUTH,
SC7180_SLAVE_UFS_MEM_CFG,
SC7180_SLAVE_USB3,
SC7180_SLAVE_VENUS_CFG,
SC7180_SLAVE_VENUS_THROTTLE_CFG,
SC7180_SLAVE_VSENSE_CTRL_CFG,
SC7180_SLAVE_SERVICE_CNOC
},
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
.id = SC7180_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 51,
.links = { SC7180_SLAVE_A1NOC_CFG,
SC7180_SLAVE_A2NOC_CFG,
SC7180_SLAVE_AHB2PHY_SOUTH,
SC7180_SLAVE_AHB2PHY_CENTER,
SC7180_SLAVE_AOP,
SC7180_SLAVE_AOSS,
SC7180_SLAVE_BOOT_ROM,
SC7180_SLAVE_CAMERA_CFG,
SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG,
SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG,
SC7180_SLAVE_CLK_CTL,
SC7180_SLAVE_RBCPR_CX_CFG,
SC7180_SLAVE_RBCPR_MX_CFG,
SC7180_SLAVE_CRYPTO_0_CFG,
SC7180_SLAVE_DCC_CFG,
SC7180_SLAVE_CNOC_DDRSS,
SC7180_SLAVE_DISPLAY_CFG,
SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG,
SC7180_SLAVE_DISPLAY_THROTTLE_CFG,
SC7180_SLAVE_EMMC_CFG,
SC7180_SLAVE_GLM,
SC7180_SLAVE_GFX3D_CFG,
SC7180_SLAVE_IMEM_CFG,
SC7180_SLAVE_IPA_CFG,
SC7180_SLAVE_CNOC_MNOC_CFG,
SC7180_SLAVE_CNOC_MSS,
SC7180_SLAVE_NPU_CFG,
SC7180_SLAVE_NPU_DMA_BWMON_CFG,
SC7180_SLAVE_NPU_PROC_BWMON_CFG,
SC7180_SLAVE_PDM,
SC7180_SLAVE_PIMEM_CFG,
SC7180_SLAVE_PRNG,
SC7180_SLAVE_QDSS_CFG,
SC7180_SLAVE_QM_CFG,
SC7180_SLAVE_QM_MPU_CFG,
SC7180_SLAVE_QSPI_0,
SC7180_SLAVE_QUP_0,
SC7180_SLAVE_QUP_1,
SC7180_SLAVE_SDCC_2,
SC7180_SLAVE_SECURITY,
SC7180_SLAVE_SNOC_CFG,
SC7180_SLAVE_TCSR,
SC7180_SLAVE_TLMM_WEST,
SC7180_SLAVE_TLMM_NORTH,
SC7180_SLAVE_TLMM_SOUTH,
SC7180_SLAVE_UFS_MEM_CFG,
SC7180_SLAVE_USB3,
SC7180_SLAVE_VENUS_CFG,
SC7180_SLAVE_VENUS_THROTTLE_CFG,
SC7180_SLAVE_VSENSE_CTRL_CFG,
SC7180_SLAVE_SERVICE_CNOC
},
};
static struct qcom_icc_node qhm_cnoc_dc_noc = {
.name = "qhm_cnoc_dc_noc",
.id = SC7180_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SC7180_SLAVE_GEM_NOC_CFG,
SC7180_SLAVE_LLCC_CFG
},
};
static struct qcom_icc_node acm_apps0 = {
.name = "acm_apps0",
.id = SC7180_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
.links = { SC7180_SLAVE_GEM_NOC_SNOC,
SC7180_SLAVE_LLCC
},
};
static struct qcom_icc_node acm_sys_tcu = {
.name = "acm_sys_tcu",
.id = SC7180_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SC7180_SLAVE_GEM_NOC_SNOC,
SC7180_SLAVE_LLCC
},
};
static struct qcom_icc_node qhm_gemnoc_cfg = {
.name = "qhm_gemnoc_cfg",
.id = SC7180_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SC7180_SLAVE_MSS_PROC_MS_MPU_CFG,
SC7180_SLAVE_SERVICE_GEM_NOC
},
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
.id = SC7180_MASTER_COMPUTE_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
.links = { SC7180_SLAVE_GEM_NOC_SNOC,
SC7180_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SC7180_MASTER_MNOC_HF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SC7180_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
.links = { SC7180_SLAVE_GEM_NOC_SNOC,
SC7180_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SC7180_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SC7180_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7180_SLAVE_LLCC },
};
static struct qcom_icc_node qxm_gpu = {
.name = "qxm_gpu",
.id = SC7180_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SC7180_SLAVE_GEM_NOC_SNOC,
SC7180_SLAVE_LLCC
},
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SC7180_MASTER_LLCC,
.channels = 2,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_SLAVE_EBI1 },
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
.id = SC7180_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_SLAVE_SERVICE_MNOC },
};
static struct qcom_icc_node qxm_camnoc_hf0 = {
.name = "qxm_camnoc_hf0",
.id = SC7180_MASTER_CAMNOC_HF0,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_hf1 = {
.name = "qxm_camnoc_hf1",
.id = SC7180_MASTER_CAMNOC_HF1,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
.id = SC7180_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
.id = SC7180_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
.id = SC7180_MASTER_ROTATOR,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7180_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_venus0 = {
.name = "qxm_venus0",
.id = SC7180_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_venus_arm9 = {
.name = "qxm_venus_arm9",
.id = SC7180_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node amm_npu_sys = {
.name = "amm_npu_sys",
.id = SC7180_MASTER_NPU_SYS,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_SLAVE_NPU_COMPUTE_NOC },
};
static struct qcom_icc_node qhm_npu_cfg = {
.name = "qhm_npu_cfg",
.id = SC7180_MASTER_NPU_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 8,
.links = { SC7180_SLAVE_NPU_CAL_DP0,
SC7180_SLAVE_NPU_CP,
SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG,
SC7180_SLAVE_NPU_DPM,
SC7180_SLAVE_ISENSE_CFG,
SC7180_SLAVE_NPU_LLM_CFG,
SC7180_SLAVE_NPU_TCM,
SC7180_SLAVE_SERVICE_NPU_NOC
},
};
static struct qcom_icc_node qup_core_master_1 = {
.name = "qup_core_master_1",
.id = SC7180_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_SLAVE_QUP_CORE_0 },
};
static struct qcom_icc_node qup_core_master_2 = {
.name = "qup_core_master_2",
.id = SC7180_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_SLAVE_QUP_CORE_1 },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
.id = SC7180_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
.id = SC7180_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 6,
.links = { SC7180_SLAVE_APPSS,
SC7180_SLAVE_SNOC_CNOC,
SC7180_SLAVE_SNOC_GEM_NOC_SF,
SC7180_SLAVE_IMEM,
SC7180_SLAVE_PIMEM,
SC7180_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
.id = SC7180_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 7,
.links = { SC7180_SLAVE_APPSS,
SC7180_SLAVE_SNOC_CNOC,
SC7180_SLAVE_SNOC_GEM_NOC_SF,
SC7180_SLAVE_IMEM,
SC7180_SLAVE_PIMEM,
SC7180_SLAVE_QDSS_STM,
SC7180_SLAVE_TCU
},
};
static struct qcom_icc_node qnm_gemnoc = {
.name = "qnm_gemnoc",
.id = SC7180_MASTER_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 6,
.links = { SC7180_SLAVE_APPSS,
SC7180_SLAVE_SNOC_CNOC,
SC7180_SLAVE_IMEM,
SC7180_SLAVE_PIMEM,
SC7180_SLAVE_QDSS_STM,
SC7180_SLAVE_TCU
},
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.id = SC7180_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SC7180_SLAVE_SNOC_GEM_NOC_GC,
SC7180_SLAVE_IMEM
},
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SC7180_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7180_MASTER_A1NOC_SNOC },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
.id = SC7180_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
.id = SC7180_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7180_MASTER_A2NOC_SNOC },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
.id = SC7180_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_camnoc_uncomp = {
.name = "qns_camnoc_uncomp",
.id = SC7180_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qns_cdsp_gemnoc = {
.name = "qns_cdsp_gemnoc",
.id = SC7180_SLAVE_CDSP_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_MASTER_COMPUTE_NOC },
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
.id = SC7180_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_MASTER_A1NOC_CFG },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
.id = SC7180_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_MASTER_A2NOC_CFG },
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
.id = SC7180_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy2 = {
.name = "qhs_ahb2phy2",
.id = SC7180_SLAVE_AHB2PHY_CENTER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aop = {
.name = "qhs_aop",
.id = SC7180_SLAVE_AOP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SC7180_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_boot_rom = {
.name = "qhs_boot_rom",
.id = SC7180_SLAVE_BOOT_ROM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
.id = SC7180_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_nrt_throttle_cfg = {
.name = "qhs_camera_nrt_throttle_cfg",
.id = SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
.name = "qhs_camera_rt_throttle_cfg",
.id = SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SC7180_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = SC7180_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
.id = SC7180_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SC7180_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
.id = SC7180_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
.id = SC7180_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_MASTER_CNOC_DC_NOC },
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
.id = SC7180_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_rt_throttle_cfg = {
.name = "qhs_display_rt_throttle_cfg",
.id = SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_throttle_cfg = {
.name = "qhs_display_throttle_cfg",
.id = SC7180_SLAVE_DISPLAY_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emmc_cfg = {
.name = "qhs_emmc_cfg",
.id = SC7180_SLAVE_EMMC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_glm = {
.name = "qhs_glm",
.id = SC7180_SLAVE_GLM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
.id = SC7180_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SC7180_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SC7180_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
.id = SC7180_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_MASTER_CNOC_MNOC_CFG },
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
.id = SC7180_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_npu_cfg = {
.name = "qhs_npu_cfg",
.id = SC7180_SLAVE_NPU_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_MASTER_NPU_NOC_CFG },
};
static struct qcom_icc_node qhs_npu_dma_throttle_cfg = {
.name = "qhs_npu_dma_throttle_cfg",
.id = SC7180_SLAVE_NPU_DMA_BWMON_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_npu_dsp_throttle_cfg = {
.name = "qhs_npu_dsp_throttle_cfg",
.id = SC7180_SLAVE_NPU_PROC_BWMON_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SC7180_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = SC7180_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
.id = SC7180_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SC7180_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qm_cfg = {
.name = "qhs_qm_cfg",
.id = SC7180_SLAVE_QM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qm_mpu_cfg = {
.name = "qhs_qm_mpu_cfg",
.id = SC7180_SLAVE_QM_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
.id = SC7180_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
.id = SC7180_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
.id = SC7180_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
.id = SC7180_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
.id = SC7180_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
.id = SC7180_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SC7180_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_1 = {
.name = "qhs_tlmm_1",
.id = SC7180_SLAVE_TLMM_WEST,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_2 = {
.name = "qhs_tlmm_2",
.id = SC7180_SLAVE_TLMM_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_3 = {
.name = "qhs_tlmm_3",
.id = SC7180_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
.id = SC7180_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3 = {
.name = "qhs_usb3",
.id = SC7180_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
.id = SC7180_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_throttle_cfg = {
.name = "qhs_venus_throttle_cfg",
.id = SC7180_SLAVE_VENUS_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
.id = SC7180_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
.id = SC7180_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gemnoc = {
.name = "qhs_gemnoc",
.id = SC7180_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC7180_MASTER_GEM_NOC_CFG },
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
.id = SC7180_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
.id = SC7180_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gem_noc_snoc = {
.name = "qns_gem_noc_snoc",
.id = SC7180_SLAVE_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_MASTER_GEM_NOC_SNOC },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SC7180_SLAVE_LLCC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7180_MASTER_LLCC },
};
static struct qcom_icc_node srvc_gemnoc = {
.name = "srvc_gemnoc",
.id = SC7180_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SC7180_SLAVE_EBI1,
.channels = 2,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
.id = SC7180_SLAVE_MNOC_HF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_MASTER_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
.id = SC7180_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC7180_MASTER_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
.id = SC7180_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cal_dp0 = {
.name = "qhs_cal_dp0",
.id = SC7180_SLAVE_NPU_CAL_DP0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cp = {
.name = "qhs_cp",
.id = SC7180_SLAVE_NPU_CP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dma_bwmon = {
.name = "qhs_dma_bwmon",
.id = SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dpm = {
.name = "qhs_dpm",
.id = SC7180_SLAVE_NPU_DPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_isense = {
.name = "qhs_isense",
.id = SC7180_SLAVE_ISENSE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llm = {
.name = "qhs_llm",
.id = SC7180_SLAVE_NPU_LLM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcm = {
.name = "qhs_tcm",
.id = SC7180_SLAVE_NPU_TCM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_npu_sys = {
.name = "qns_npu_sys",
.id = SC7180_SLAVE_NPU_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
};
static struct qcom_icc_node srvc_noc = {
.name = "srvc_noc",
.id = SC7180_SLAVE_SERVICE_NPU_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup_core_slave_1 = {
.name = "qup_core_slave_1",
.id = SC7180_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup_core_slave_2 = {
.name = "qup_core_slave_2",
.id = SC7180_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SC7180_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
.id = SC7180_SLAVE_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_MASTER_SNOC_CNOC },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
.id = SC7180_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC7180_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
.id = SC7180_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC7180_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SC7180_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
.id = SC7180_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SC7180_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SC7180_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SC7180_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.keepalive = true,
.num_nodes = 48,
.nodes = { &qnm_snoc,
&xm_qdss_dap,
&qhs_a1_noc_cfg,
&qhs_a2_noc_cfg,
&qhs_ahb2phy0,
&qhs_aop,
&qhs_aoss,
&qhs_boot_rom,
&qhs_camera_cfg,
&qhs_camera_nrt_throttle_cfg,
&qhs_camera_rt_throttle_cfg,
&qhs_clk_ctl,
&qhs_cpr_cx,
&qhs_cpr_mx,
&qhs_crypto0_cfg,
&qhs_dcc_cfg,
&qhs_ddrss_cfg,
&qhs_display_cfg,
&qhs_display_rt_throttle_cfg,
&qhs_display_throttle_cfg,
&qhs_glm,
&qhs_gpuss_cfg,
&qhs_imem_cfg,
&qhs_ipa,
&qhs_mnoc_cfg,
&qhs_mss_cfg,
&qhs_npu_cfg,
&qhs_npu_dma_throttle_cfg,
&qhs_npu_dsp_throttle_cfg,
&qhs_pimem_cfg,
&qhs_prng,
&qhs_qdss_cfg,
&qhs_qm_cfg,
&qhs_qm_mpu_cfg,
&qhs_qup0,
&qhs_qup1,
&qhs_security,
&qhs_snoc_cfg,
&qhs_tcsr,
&qhs_tlmm_1,
&qhs_tlmm_2,
&qhs_tlmm_3,
&qhs_ufs_mem_cfg,
&qhs_usb3,
&qhs_venus_cfg,
&qhs_venus_throttle_cfg,
&qhs_vsense_ctrl_cfg,
&srvc_cnoc
},
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.keepalive = false,
.num_nodes = 8,
.nodes = { &qxm_camnoc_hf0_uncomp,
&qxm_camnoc_hf1_uncomp,
&qxm_camnoc_sf_uncomp,
&qhm_mnoc_cfg,
&qxm_mdp0,
&qxm_rot,
&qxm_venus0,
&qxm_venus_arm9
},
};
static struct qcom_icc_bcm bcm_sh2 = {
.name = "SH2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &acm_sys_tcu },
};
static struct qcom_icc_bcm bcm_mm2 = {
.name = "MM2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qup_core_master_1, &qup_core_master_2 },
};
static struct qcom_icc_bcm bcm_sh3 = {
.name = "SH3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_bcm bcm_sh4 = {
.name = "SH4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &acm_apps0 },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_cdsp_gemnoc },
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_imem },
};
static struct qcom_icc_bcm bcm_cn1 = {
.name = "CN1",
.keepalive = false,
.num_nodes = 8,
.nodes = { &qhm_qspi,
&xm_sdc2,
&xm_emmc,
&qhs_ahb2phy2,
&qhs_emmc_cfg,
&qhs_pdm,
&qhs_qspi,
&qhs_sdc2
},
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qxm_pimem, &qns_gemnoc_gc },
};
static struct qcom_icc_bcm bcm_co2 = {
.name = "CO2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_npu },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_pimem },
};
static struct qcom_icc_bcm bcm_co3 = {
.name = "CO3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_npu_dsp },
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_qdss_stm },
};
static struct qcom_icc_bcm bcm_sn7 = {
.name = "SN7",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_bcm bcm_sn9 = {
.name = "SN9",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_bcm bcm_sn12 = {
.name = "SN12",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_gemnoc },
};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_cn1,
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_QSPI] = &qhm_qspi,
[MASTER_QUP_0] = &qhm_qup_0,
[MASTER_SDCC_2] = &xm_sdc2,
[MASTER_EMMC] = &xm_emmc,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
static const struct qcom_icc_desc sc7180_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_1] = &qhm_qup_1,
[MASTER_USB3] = &qhm_usb3,
[MASTER_CRYPTO] = &qxm_crypto,
[MASTER_IPA] = &qxm_ipa,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
static const struct qcom_icc_desc sc7180_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
&bcm_mm1,
};
static struct qcom_icc_node * const camnoc_virt_nodes[] = {
[MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
[SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
};
static const struct qcom_icc_desc sc7180_camnoc_virt = {
.nodes = camnoc_virt_nodes,
.num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
.bcms = camnoc_virt_bcms,
.num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
};
static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co2,
&bcm_co3,
};
static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &qnm_npu,
[MASTER_NPU_PROC] = &qxm_npu_dsp,
[SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc,
};
static const struct qcom_icc_desc sc7180_compute_noc = {
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
.num_bcms = ARRAY_SIZE(compute_noc_bcms),
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
};
static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_SNOC_CNOC] = &qnm_snoc,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
[SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
[SLAVE_AHB2PHY_CENTER] = &qhs_ahb2phy2,
[SLAVE_AOP] = &qhs_aop,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_BOOT_ROM] = &qhs_boot_rom,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_throttle_cfg,
[SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_DCC_CFG] = &qhs_dcc_cfg,
[SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
[SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
[SLAVE_DISPLAY_RT_THROTTLE_CFG] = &qhs_display_rt_throttle_cfg,
[SLAVE_DISPLAY_THROTTLE_CFG] = &qhs_display_throttle_cfg,
[SLAVE_EMMC_CFG] = &qhs_emmc_cfg,
[SLAVE_GLM] = &qhs_glm,
[SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
[SLAVE_CNOC_MSS] = &qhs_mss_cfg,
[SLAVE_NPU_CFG] = &qhs_npu_cfg,
[SLAVE_NPU_DMA_BWMON_CFG] = &qhs_npu_dma_throttle_cfg,
[SLAVE_NPU_PROC_BWMON_CFG] = &qhs_npu_dsp_throttle_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QM_CFG] = &qhs_qm_cfg,
[SLAVE_QM_MPU_CFG] = &qhs_qm_mpu_cfg,
[SLAVE_QSPI_0] = &qhs_qspi,
[SLAVE_QUP_0] = &qhs_qup0,
[SLAVE_QUP_1] = &qhs_qup1,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SECURITY] = &qhs_security,
[SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM_WEST] = &qhs_tlmm_1,
[SLAVE_TLMM_NORTH] = &qhs_tlmm_2,
[SLAVE_TLMM_SOUTH] = &qhs_tlmm_3,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB3] = &qhs_usb3,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VENUS_THROTTLE_CFG] = &qhs_venus_throttle_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
static const struct qcom_icc_desc sc7180_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
[SLAVE_GEM_NOC_CFG] = &qhs_gemnoc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
};
static const struct qcom_icc_desc sc7180_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_APPSS_PROC] = &acm_apps0,
[MASTER_SYS_TCU] = &acm_sys_tcu,
[MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
[MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[MASTER_GFX3D] = &qxm_gpu,
[SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
[SLAVE_GEM_NOC_SNOC] = &qns_gem_noc_snoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
};
static const struct qcom_icc_desc sc7180_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
static const struct qcom_icc_desc sc7180_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
[MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
[MASTER_MDP0] = &qxm_mdp0,
[MASTER_ROTATOR] = &qxm_rot,
[MASTER_VIDEO_P0] = &qxm_venus0,
[MASTER_VIDEO_PROC] = &qxm_venus_arm9,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
static const struct qcom_icc_desc sc7180_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_node * const npu_noc_nodes[] = {
[MASTER_NPU_SYS] = &amm_npu_sys,
[MASTER_NPU_NOC_CFG] = &qhm_npu_cfg,
[SLAVE_NPU_CAL_DP0] = &qhs_cal_dp0,
[SLAVE_NPU_CP] = &qhs_cp,
[SLAVE_NPU_INT_DMA_BWMON_CFG] = &qhs_dma_bwmon,
[SLAVE_NPU_DPM] = &qhs_dpm,
[SLAVE_ISENSE_CFG] = &qhs_isense,
[SLAVE_NPU_LLM_CFG] = &qhs_llm,
[SLAVE_NPU_TCM] = &qhs_tcm,
[SLAVE_NPU_COMPUTE_NOC] = &qns_npu_sys,
[SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
};
static const struct qcom_icc_desc sc7180_npu_noc = {
.nodes = npu_noc_nodes,
.num_nodes = ARRAY_SIZE(npu_noc_nodes),
};
static struct qcom_icc_bcm * const qup_virt_bcms[] = {
&bcm_qup0,
};
static struct qcom_icc_node * const qup_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup_core_master_1,
[MASTER_QUP_CORE_1] = &qup_core_master_2,
[SLAVE_QUP_CORE_0] = &qup_core_slave_1,
[SLAVE_QUP_CORE_1] = &qup_core_slave_2,
};
static const struct qcom_icc_desc sc7180_qup_virt = {
.nodes = qup_virt_nodes,
.num_nodes = ARRAY_SIZE(qup_virt_nodes),
.bcms = qup_virt_bcms,
.num_bcms = ARRAY_SIZE(qup_virt_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
&bcm_sn3,
&bcm_sn4,
&bcm_sn7,
&bcm_sn9,
&bcm_sn12,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_GEM_NOC_SNOC] = &qnm_gemnoc,
[MASTER_PIMEM] = &qxm_pimem,
[SLAVE_APPSS] = &qhs_apss,
[SLAVE_SNOC_CNOC] = &qns_cnoc,
[SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
[SLAVE_IMEM] = &qxs_imem,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sc7180_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sc7180-aggre1-noc",
.data = &sc7180_aggre1_noc},
{ .compatible = "qcom,sc7180-aggre2-noc",
.data = &sc7180_aggre2_noc},
{ .compatible = "qcom,sc7180-camnoc-virt",
.data = &sc7180_camnoc_virt},
{ .compatible = "qcom,sc7180-compute-noc",
.data = &sc7180_compute_noc},
{ .compatible = "qcom,sc7180-config-noc",
.data = &sc7180_config_noc},
{ .compatible = "qcom,sc7180-dc-noc",
.data = &sc7180_dc_noc},
{ .compatible = "qcom,sc7180-gem-noc",
.data = &sc7180_gem_noc},
{ .compatible = "qcom,sc7180-mc-virt",
.data = &sc7180_mc_virt},
{ .compatible = "qcom,sc7180-mmss-noc",
.data = &sc7180_mmss_noc},
{ .compatible = "qcom,sc7180-npu-noc",
.data = &sc7180_npu_noc},
{ .compatible = "qcom,sc7180-qup-virt",
.data = &sc7180_qup_virt},
{ .compatible = "qcom,sc7180-system-noc",
.data = &sc7180_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sc7180",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
MODULE_DESCRIPTION("Qualcomm SC7180 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/sc7180.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022, The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sdm670-rpmh.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sdm670.h"
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
.id = SDM670_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_SLAVE_SERVICE_A1NOC },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.id = SDM670_MASTER_BLSP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_tsif = {
.name = "qhm_tsif",
.id = SDM670_MASTER_TSIF,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_emmc = {
.name = "xm_emmc",
.id = SDM670_MASTER_EMMC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
.id = SDM670_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
.id = SDM670_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SDM670_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
.id = SDM670_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_SLAVE_SERVICE_A2NOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SDM670_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
.id = SDM670_MASTER_BLSP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qnm_cnoc = {
.name = "qnm_cnoc",
.id = SDM670_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SDM670_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SDM670_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
.id = SDM670_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.id = SDM670_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
.name = "qxm_camnoc_hf0_uncomp",
.id = SDM670_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
.name = "qxm_camnoc_hf1_uncomp",
.id = SDM670_MASTER_CAMNOC_HF1_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
.name = "qxm_camnoc_sf_uncomp",
.id = SDM670_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qhm_spdm = {
.name = "qhm_spdm",
.id = SDM670_MASTER_SPDM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_SLAVE_CNOC_A2NOC },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
.id = SDM670_MASTER_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 38,
.links = { SDM670_SLAVE_TLMM_SOUTH,
SDM670_SLAVE_CAMERA_CFG,
SDM670_SLAVE_SDCC_4,
SDM670_SLAVE_SDCC_2,
SDM670_SLAVE_CNOC_MNOC_CFG,
SDM670_SLAVE_UFS_MEM_CFG,
SDM670_SLAVE_GLM,
SDM670_SLAVE_PDM,
SDM670_SLAVE_A2NOC_CFG,
SDM670_SLAVE_QDSS_CFG,
SDM670_SLAVE_DISPLAY_CFG,
SDM670_SLAVE_TCSR,
SDM670_SLAVE_DCC_CFG,
SDM670_SLAVE_CNOC_DDRSS,
SDM670_SLAVE_SNOC_CFG,
SDM670_SLAVE_SOUTH_PHY_CFG,
SDM670_SLAVE_GRAPHICS_3D_CFG,
SDM670_SLAVE_VENUS_CFG,
SDM670_SLAVE_TSIF,
SDM670_SLAVE_CDSP_CFG,
SDM670_SLAVE_AOP,
SDM670_SLAVE_BLSP_2,
SDM670_SLAVE_SERVICE_CNOC,
SDM670_SLAVE_USB3,
SDM670_SLAVE_IPA_CFG,
SDM670_SLAVE_RBCPR_CX_CFG,
SDM670_SLAVE_A1NOC_CFG,
SDM670_SLAVE_AOSS,
SDM670_SLAVE_PRNG,
SDM670_SLAVE_VSENSE_CTRL_CFG,
SDM670_SLAVE_EMMC_CFG,
SDM670_SLAVE_BLSP_1,
SDM670_SLAVE_SPDM_WRAPPER,
SDM670_SLAVE_CRYPTO_0_CFG,
SDM670_SLAVE_PIMEM_CFG,
SDM670_SLAVE_TLMM_NORTH,
SDM670_SLAVE_CLK_CTL,
SDM670_SLAVE_IMEM_CFG
},
};
static struct qcom_icc_node qhm_cnoc = {
.name = "qhm_cnoc",
.id = SDM670_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SDM670_SLAVE_MEM_NOC_CFG,
SDM670_SLAVE_LLCC_CFG
},
};
static struct qcom_icc_node acm_l3 = {
.name = "acm_l3",
.id = SDM670_MASTER_AMPSS_M0,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SDM670_SLAVE_SERVICE_GNOC,
SDM670_SLAVE_GNOC_SNOC,
SDM670_SLAVE_GNOC_MEM_NOC
},
};
static struct qcom_icc_node pm_gnoc_cfg = {
.name = "pm_gnoc_cfg",
.id = SDM670_MASTER_GNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_SLAVE_SERVICE_GNOC },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SDM670_MASTER_LLCC,
.channels = 2,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_SLAVE_EBI_CH0 },
};
static struct qcom_icc_node acm_tcu = {
.name = "acm_tcu",
.id = SDM670_MASTER_TCU_0,
.channels = 1,
.buswidth = 8,
.num_links = 3,
.links = { SDM670_SLAVE_MEM_NOC_GNOC,
SDM670_SLAVE_LLCC,
SDM670_SLAVE_MEM_NOC_SNOC
},
};
static struct qcom_icc_node qhm_memnoc_cfg = {
.name = "qhm_memnoc_cfg",
.id = SDM670_MASTER_MEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SDM670_SLAVE_SERVICE_MEM_NOC,
SDM670_SLAVE_MSS_PROC_MS_MPU_CFG
},
};
static struct qcom_icc_node qnm_apps = {
.name = "qnm_apps",
.id = SDM670_MASTER_GNOC_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SDM670_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SDM670_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 3,
.links = { SDM670_SLAVE_MEM_NOC_GNOC,
SDM670_SLAVE_LLCC,
SDM670_SLAVE_MEM_NOC_SNOC
},
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SDM670_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SDM670_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
.links = { SDM670_SLAVE_MEM_NOC_GNOC,
SDM670_SLAVE_LLCC
},
};
static struct qcom_icc_node qxm_gpu = {
.name = "qxm_gpu",
.id = SDM670_MASTER_GRAPHICS_3D,
.channels = 2,
.buswidth = 32,
.num_links = 3,
.links = { SDM670_SLAVE_MEM_NOC_GNOC,
SDM670_SLAVE_LLCC,
SDM670_SLAVE_MEM_NOC_SNOC
},
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
.id = SDM670_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_SLAVE_SERVICE_MNOC },
};
static struct qcom_icc_node qxm_camnoc_hf0 = {
.name = "qxm_camnoc_hf0",
.id = SDM670_MASTER_CAMNOC_HF0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_hf1 = {
.name = "qxm_camnoc_hf1",
.id = SDM670_MASTER_CAMNOC_HF1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
.id = SDM670_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
.id = SDM670_MASTER_MDP_PORT0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp1 = {
.name = "qxm_mdp1",
.id = SDM670_MASTER_MDP_PORT1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
.id = SDM670_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_venus0 = {
.name = "qxm_venus0",
.id = SDM670_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_venus1 = {
.name = "qxm_venus1",
.id = SDM670_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_venus_arm9 = {
.name = "qxm_venus_arm9",
.id = SDM670_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
.id = SDM670_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
.id = SDM670_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 6,
.links = { SDM670_SLAVE_PIMEM,
SDM670_SLAVE_SNOC_MEM_NOC_SF,
SDM670_SLAVE_OCIMEM,
SDM670_SLAVE_APPSS,
SDM670_SLAVE_SNOC_CNOC,
SDM670_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
.id = SDM670_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 7,
.links = { SDM670_SLAVE_PIMEM,
SDM670_SLAVE_SNOC_MEM_NOC_SF,
SDM670_SLAVE_OCIMEM,
SDM670_SLAVE_APPSS,
SDM670_SLAVE_SNOC_CNOC,
SDM670_SLAVE_TCU,
SDM670_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qnm_gladiator_sodv = {
.name = "qnm_gladiator_sodv",
.id = SDM670_MASTER_GNOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 6,
.links = { SDM670_SLAVE_PIMEM,
SDM670_SLAVE_OCIMEM,
SDM670_SLAVE_APPSS,
SDM670_SLAVE_SNOC_CNOC,
SDM670_SLAVE_TCU,
SDM670_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qnm_memnoc = {
.name = "qnm_memnoc",
.id = SDM670_MASTER_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 5,
.links = { SDM670_SLAVE_OCIMEM,
SDM670_SLAVE_APPSS,
SDM670_SLAVE_PIMEM,
SDM670_SLAVE_SNOC_CNOC,
SDM670_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.id = SDM670_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SDM670_SLAVE_OCIMEM,
SDM670_SLAVE_SNOC_MEM_NOC_GC
},
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = SDM670_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SDM670_SLAVE_OCIMEM,
SDM670_SLAVE_SNOC_MEM_NOC_GC
},
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SDM670_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SDM670_MASTER_A1NOC_SNOC },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
.id = SDM670_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
.id = SDM670_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SDM670_MASTER_A2NOC_SNOC },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
.id = SDM670_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_camnoc_uncomp = {
.name = "qns_camnoc_uncomp",
.id = SDM670_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
.id = SDM670_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_MASTER_A1NOC_CFG },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
.id = SDM670_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_MASTER_A2NOC_CFG },
};
static struct qcom_icc_node qhs_aop = {
.name = "qhs_aop",
.id = SDM670_SLAVE_AOP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SDM670_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
.id = SDM670_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SDM670_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute_dsp_cfg = {
.name = "qhs_compute_dsp_cfg",
.id = SDM670_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = SDM670_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SDM670_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
.id = SDM670_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_MASTER_CNOC_DC_NOC },
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
.id = SDM670_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
.id = SDM670_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emmc_cfg = {
.name = "qhs_emmc_cfg",
.id = SDM670_SLAVE_EMMC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_glm = {
.name = "qhs_glm",
.id = SDM670_SLAVE_GLM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
.id = SDM670_SLAVE_GRAPHICS_3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SDM670_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SDM670_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
.id = SDM670_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_MASTER_CNOC_MNOC_CFG },
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SDM670_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_phy_refgen_south = {
.name = "qhs_phy_refgen_south",
.id = SDM670_SLAVE_SOUTH_PHY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = SDM670_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
.id = SDM670_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SDM670_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_north = {
.name = "qhs_qupv3_north",
.id = SDM670_SLAVE_BLSP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_south = {
.name = "qhs_qupv3_south",
.id = SDM670_SLAVE_BLSP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
.id = SDM670_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
.id = SDM670_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
.id = SDM670_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qhs_spdm = {
.name = "qhs_spdm",
.id = SDM670_SLAVE_SPDM_WRAPPER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SDM670_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_north = {
.name = "qhs_tlmm_north",
.id = SDM670_SLAVE_TLMM_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_south = {
.name = "qhs_tlmm_south",
.id = SDM670_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tsif = {
.name = "qhs_tsif",
.id = SDM670_SLAVE_TSIF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
.id = SDM670_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
.id = SDM670_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
.id = SDM670_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
.id = SDM670_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cnoc_a2noc = {
.name = "qns_cnoc_a2noc",
.id = SDM670_SLAVE_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_MASTER_CNOC_A2NOC },
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
.id = SDM670_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
.id = SDM670_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_memnoc = {
.name = "qhs_memnoc",
.id = SDM670_SLAVE_MEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM670_MASTER_MEM_NOC_CFG },
};
static struct qcom_icc_node qns_gladiator_sodv = {
.name = "qns_gladiator_sodv",
.id = SDM670_SLAVE_GNOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_MASTER_GNOC_SNOC },
};
static struct qcom_icc_node qns_gnoc_memnoc = {
.name = "qns_gnoc_memnoc",
.id = SDM670_SLAVE_GNOC_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_MASTER_GNOC_MEM_NOC },
};
static struct qcom_icc_node srvc_gnoc = {
.name = "srvc_gnoc",
.id = SDM670_SLAVE_SERVICE_GNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SDM670_SLAVE_EBI_CH0,
.channels = 2,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
.id = SDM670_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_apps_io = {
.name = "qns_apps_io",
.id = SDM670_SLAVE_MEM_NOC_GNOC,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SDM670_SLAVE_LLCC,
.channels = 2,
.buswidth = 16,
.num_links = 1,
.links = { SDM670_MASTER_LLCC },
};
static struct qcom_icc_node qns_memnoc_snoc = {
.name = "qns_memnoc_snoc",
.id = SDM670_SLAVE_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_MASTER_MEM_NOC_SNOC },
};
static struct qcom_icc_node srvc_memnoc = {
.name = "srvc_memnoc",
.id = SDM670_SLAVE_SERVICE_MEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns2_mem_noc = {
.name = "qns2_mem_noc",
.id = SDM670_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_MASTER_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
.id = SDM670_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SDM670_MASTER_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
.id = SDM670_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SDM670_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
.id = SDM670_SLAVE_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_MASTER_SNOC_CNOC },
};
static struct qcom_icc_node qns_memnoc_gc = {
.name = "qns_memnoc_gc",
.id = SDM670_SLAVE_SNOC_MEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM670_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_memnoc_sf = {
.name = "qns_memnoc_sf",
.id = SDM670_SLAVE_SNOC_MEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SDM670_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SDM670_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
.id = SDM670_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SDM670_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SDM670_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SDM670_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_bcm bcm_sh1 = {
.name = "SH1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_apps_io },
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.keepalive = true,
.num_nodes = 7,
.nodes = { &qxm_camnoc_hf0_uncomp,
&qxm_camnoc_hf1_uncomp,
&qxm_camnoc_sf_uncomp,
&qxm_camnoc_hf0,
&qxm_camnoc_hf1,
&qxm_mdp0,
&qxm_mdp1
},
};
static struct qcom_icc_bcm bcm_sh2 = {
.name = "SH2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_memnoc_snoc },
};
static struct qcom_icc_bcm bcm_mm2 = {
.name = "MM2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns2_mem_noc },
};
static struct qcom_icc_bcm bcm_sh3 = {
.name = "SH3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &acm_tcu },
};
static struct qcom_icc_bcm bcm_mm3 = {
.name = "MM3",
.keepalive = false,
.num_nodes = 5,
.nodes = { &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
};
static struct qcom_icc_bcm bcm_sh5 = {
.name = "SH5",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_apps },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_memnoc_sf },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.keepalive = true,
.num_nodes = 41,
.nodes = { &qhm_spdm,
&qnm_snoc,
&qhs_a1_noc_cfg,
&qhs_a2_noc_cfg,
&qhs_aop,
&qhs_aoss,
&qhs_camera_cfg,
&qhs_clk_ctl,
&qhs_compute_dsp_cfg,
&qhs_cpr_cx,
&qhs_crypto0_cfg,
&qhs_dcc_cfg,
&qhs_ddrss_cfg,
&qhs_display_cfg,
&qhs_emmc_cfg,
&qhs_glm,
&qhs_gpuss_cfg,
&qhs_imem_cfg,
&qhs_ipa,
&qhs_mnoc_cfg,
&qhs_pdm,
&qhs_phy_refgen_south,
&qhs_pimem_cfg,
&qhs_prng,
&qhs_qdss_cfg,
&qhs_qupv3_north,
&qhs_qupv3_south,
&qhs_sdc2,
&qhs_sdc4,
&qhs_snoc_cfg,
&qhs_spdm,
&qhs_tcsr,
&qhs_tlmm_north,
&qhs_tlmm_south,
&qhs_tsif,
&qhs_ufs_mem_cfg,
&qhs_usb3_0,
&qhs_venus_cfg,
&qhs_vsense_ctrl_cfg,
&qns_cnoc_a2noc,
&srvc_cnoc
},
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qhm_qup1, &qhm_qup2 },
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_imem },
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_memnoc_gc },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_cnoc },
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qxm_pimem, &qxs_pimem },
};
static struct qcom_icc_bcm bcm_sn5 = {
.name = "SN5",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_qdss_stm },
};
static struct qcom_icc_bcm bcm_sn8 = {
.name = "SN8",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qnm_aggre1_noc, &srvc_aggre1_noc },
};
static struct qcom_icc_bcm bcm_sn10 = {
.name = "SN10",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qnm_aggre2_noc, &srvc_aggre2_noc },
};
static struct qcom_icc_bcm bcm_sn11 = {
.name = "SN11",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qnm_gladiator_sodv, &xm_gic },
};
static struct qcom_icc_bcm bcm_sn13 = {
.name = "SN13",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_memnoc },
};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_qup0,
&bcm_sn8,
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_BLSP_1] = &qhm_qup1,
[MASTER_TSIF] = &qhm_tsif,
[MASTER_EMMC] = &xm_emmc,
[MASTER_SDCC_2] = &xm_sdc2,
[MASTER_SDCC_4] = &xm_sdc4,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
static const struct qcom_icc_desc sdm670_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_qup0,
&bcm_sn10,
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_BLSP_2] = &qhm_qup2,
[MASTER_CNOC_A2NOC] = &qnm_cnoc,
[MASTER_CRYPTO_CORE_0] = &qxm_crypto,
[MASTER_IPA] = &qxm_ipa,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[MASTER_USB3] = &xm_usb3_0,
[SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
static const struct qcom_icc_desc sdm670_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_SPDM] = &qhm_spdm,
[MASTER_SNOC_CNOC] = &qnm_snoc,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
[SLAVE_AOP] = &qhs_aop,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_DCC_CFG] = &qhs_dcc_cfg,
[SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
[SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
[SLAVE_EMMC_CFG] = &qhs_emmc_cfg,
[SLAVE_GLM] = &qhs_glm,
[SLAVE_GRAPHICS_3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_SOUTH_PHY_CFG] = &qhs_phy_refgen_south,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_BLSP_2] = &qhs_qupv3_north,
[SLAVE_BLSP_1] = &qhs_qupv3_south,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SDCC_4] = &qhs_sdc4,
[SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
[SLAVE_SPDM_WRAPPER] = &qhs_spdm,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM_NORTH] = &qhs_tlmm_north,
[SLAVE_TLMM_SOUTH] = &qhs_tlmm_south,
[SLAVE_TSIF] = &qhs_tsif,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB3] = &qhs_usb3_0,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
static const struct qcom_icc_desc sdm670_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
};
static const struct qcom_icc_desc sdm670_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
static struct qcom_icc_bcm * const gladiator_noc_bcms[] = {
};
static struct qcom_icc_node * const gladiator_noc_nodes[] = {
[MASTER_AMPSS_M0] = &acm_l3,
[MASTER_GNOC_CFG] = &pm_gnoc_cfg,
[SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
[SLAVE_GNOC_MEM_NOC] = &qns_gnoc_memnoc,
[SLAVE_SERVICE_GNOC] = &srvc_gnoc,
};
static const struct qcom_icc_desc sdm670_gladiator_noc = {
.nodes = gladiator_noc_nodes,
.num_nodes = ARRAY_SIZE(gladiator_noc_nodes),
.bcms = gladiator_noc_bcms,
.num_bcms = ARRAY_SIZE(gladiator_noc_bcms),
};
static struct qcom_icc_bcm * const mem_noc_bcms[] = {
&bcm_acv,
&bcm_mc0,
&bcm_sh0,
&bcm_sh1,
&bcm_sh2,
&bcm_sh3,
&bcm_sh5,
};
static struct qcom_icc_node * const mem_noc_nodes[] = {
[MASTER_TCU_0] = &acm_tcu,
[MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
[MASTER_GNOC_MEM_NOC] = &qnm_apps,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[MASTER_GRAPHICS_3D] = &qxm_gpu,
[SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
[SLAVE_MEM_NOC_GNOC] = &qns_apps_io,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
[SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc,
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI_CH0] = &ebi,
};
static const struct qcom_icc_desc sdm670_mem_noc = {
.nodes = mem_noc_nodes,
.num_nodes = ARRAY_SIZE(mem_noc_nodes),
.bcms = mem_noc_bcms,
.num_bcms = ARRAY_SIZE(mem_noc_bcms),
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
&bcm_mm3,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
[MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
[MASTER_MDP_PORT0] = &qxm_mdp0,
[MASTER_MDP_PORT1] = &qxm_mdp1,
[MASTER_ROTATOR] = &qxm_rot,
[MASTER_VIDEO_P0] = &qxm_venus0,
[MASTER_VIDEO_P1] = &qxm_venus1,
[MASTER_VIDEO_PROC] = &qxm_venus_arm9,
[SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
static const struct qcom_icc_desc sdm670_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_mm1,
&bcm_sn0,
&bcm_sn1,
&bcm_sn10,
&bcm_sn11,
&bcm_sn13,
&bcm_sn2,
&bcm_sn3,
&bcm_sn4,
&bcm_sn5,
&bcm_sn8,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_GNOC_SNOC] = &qnm_gladiator_sodv,
[MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
[MASTER_PIMEM] = &qxm_pimem,
[MASTER_GIC] = &xm_gic,
[SLAVE_APPSS] = &qhs_apss,
[SLAVE_SNOC_CNOC] = &qns_cnoc,
[SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc,
[SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf,
[SLAVE_OCIMEM] = &qxs_imem,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
[MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
[SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
};
static const struct qcom_icc_desc sdm670_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sdm670-aggre1-noc",
.data = &sdm670_aggre1_noc},
{ .compatible = "qcom,sdm670-aggre2-noc",
.data = &sdm670_aggre2_noc},
{ .compatible = "qcom,sdm670-config-noc",
.data = &sdm670_config_noc},
{ .compatible = "qcom,sdm670-dc-noc",
.data = &sdm670_dc_noc},
{ .compatible = "qcom,sdm670-gladiator-noc",
.data = &sdm670_gladiator_noc},
{ .compatible = "qcom,sdm670-mem-noc",
.data = &sdm670_mem_noc},
{ .compatible = "qcom,sdm670-mmss-noc",
.data = &sdm670_mmss_noc},
{ .compatible = "qcom,sdm670-system-noc",
.data = &sdm670_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sdm670",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
MODULE_DESCRIPTION("Qualcomm SDM670 NoC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/interconnect/qcom/sdm670.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include "bcm-voter.h"
#include "icc-common.h"
#include "icc-rpmh.h"
/**
* qcom_icc_pre_aggregate - cleans up stale values from prior icc_set
* @node: icc node to operate on
*/
void qcom_icc_pre_aggregate(struct icc_node *node)
{
size_t i;
struct qcom_icc_node *qn;
struct qcom_icc_provider *qp;
qn = node->data;
qp = to_qcom_provider(node->provider);
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
qn->sum_avg[i] = 0;
qn->max_peak[i] = 0;
}
for (i = 0; i < qn->num_bcms; i++)
qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
}
EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
/**
* qcom_icc_aggregate - aggregate bw for buckets indicated by tag
* @node: node to aggregate
* @tag: tag to indicate which buckets to aggregate
* @avg_bw: new bw to sum aggregate
* @peak_bw: new bw to max aggregate
* @agg_avg: existing aggregate avg bw val
* @agg_peak: existing aggregate peak bw val
*/
int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
{
size_t i;
struct qcom_icc_node *qn;
qn = node->data;
if (!tag)
tag = QCOM_ICC_TAG_ALWAYS;
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
if (tag & BIT(i)) {
qn->sum_avg[i] += avg_bw;
qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
}
if (node->init_avg || node->init_peak) {
qn->sum_avg[i] = max_t(u64, qn->sum_avg[i], node->init_avg);
qn->max_peak[i] = max_t(u64, qn->max_peak[i], node->init_peak);
}
}
*agg_avg += avg_bw;
*agg_peak = max_t(u32, *agg_peak, peak_bw);
return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
/**
* qcom_icc_set - set the constraints based on path
* @src: source node for the path to set constraints on
* @dst: destination node for the path to set constraints on
*
* Return: 0 on success, or an error code otherwise
*/
int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
struct qcom_icc_provider *qp;
struct icc_node *node;
if (!src)
node = dst;
else
node = src;
qp = to_qcom_provider(node->provider);
qcom_icc_bcm_voter_commit(qp->voter);
return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_set);
/**
* qcom_icc_bcm_init - populates bcm aux data and connect qnodes
* @bcm: bcm to be initialized
* @dev: associated provider device
*
* Return: 0 on success, or an error code otherwise
*/
int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev)
{
struct qcom_icc_node *qn;
const struct bcm_db *data;
size_t data_count;
int i;
/* BCM is already initialised*/
if (bcm->addr)
return 0;
bcm->addr = cmd_db_read_addr(bcm->name);
if (!bcm->addr) {
dev_err(dev, "%s could not find RPMh address\n",
bcm->name);
return -EINVAL;
}
data = cmd_db_read_aux_data(bcm->name, &data_count);
if (IS_ERR(data)) {
dev_err(dev, "%s command db read error (%ld)\n",
bcm->name, PTR_ERR(data));
return PTR_ERR(data);
}
if (!data_count) {
dev_err(dev, "%s command db missing or partial aux data\n",
bcm->name);
return -EINVAL;
}
bcm->aux_data.unit = le32_to_cpu(data->unit);
bcm->aux_data.width = le16_to_cpu(data->width);
bcm->aux_data.vcd = data->vcd;
bcm->aux_data.reserved = data->reserved;
INIT_LIST_HEAD(&bcm->list);
INIT_LIST_HEAD(&bcm->ws_list);
if (!bcm->vote_scale)
bcm->vote_scale = 1000;
/* Link Qnodes to their respective BCMs */
for (i = 0; i < bcm->num_nodes; i++) {
qn = bcm->nodes[i];
qn->bcms[qn->num_bcms] = bcm;
qn->num_bcms++;
}
return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_bcm_init);
int qcom_icc_rpmh_probe(struct platform_device *pdev)
{
const struct qcom_icc_desc *desc;
struct device *dev = &pdev->dev;
struct icc_onecell_data *data;
struct icc_provider *provider;
struct qcom_icc_node * const *qnodes, *qn;
struct qcom_icc_provider *qp;
struct icc_node *node;
size_t num_nodes, i, j;
int ret;
desc = of_device_get_match_data(dev);
if (!desc)
return -EINVAL;
qnodes = desc->nodes;
num_nodes = desc->num_nodes;
qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
if (!qp)
return -ENOMEM;
data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->num_nodes = num_nodes;
provider = &qp->provider;
provider->dev = dev;
provider->set = qcom_icc_set;
provider->pre_aggregate = qcom_icc_pre_aggregate;
provider->aggregate = qcom_icc_aggregate;
provider->xlate_extended = qcom_icc_xlate_extended;
provider->data = data;
icc_provider_init(provider);
qp->dev = dev;
qp->bcms = desc->bcms;
qp->num_bcms = desc->num_bcms;
qp->voter = of_bcm_voter_get(qp->dev, NULL);
if (IS_ERR(qp->voter))
return PTR_ERR(qp->voter);
for (i = 0; i < qp->num_bcms; i++)
qcom_icc_bcm_init(qp->bcms[i], dev);
for (i = 0; i < num_nodes; i++) {
qn = qnodes[i];
if (!qn)
continue;
node = icc_node_create(qn->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
goto err_remove_nodes;
}
node->name = qn->name;
node->data = qn;
icc_node_add(node, provider);
for (j = 0; j < qn->num_links; j++)
icc_link_create(node, qn->links[j]);
data->nodes[i] = node;
}
ret = icc_provider_register(provider);
if (ret)
goto err_remove_nodes;
platform_set_drvdata(pdev, qp);
/* Populate child NoC devices if any */
if (of_get_child_count(dev->of_node) > 0) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret)
goto err_deregister_provider;
}
return 0;
err_deregister_provider:
icc_provider_deregister(provider);
err_remove_nodes:
icc_nodes_remove(provider);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_icc_rpmh_probe);
int qcom_icc_rpmh_remove(struct platform_device *pdev)
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_rpmh_remove);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/icc-rpmh.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm SDM630/SDM636/SDM660 Network-on-Chip (NoC) QoS driver
* Copyright (C) 2020, AngeloGioacchino Del Regno <[email protected]>
*/
#include <dt-bindings/interconnect/qcom,sdm660.h>
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include "icc-rpm.h"
enum {
SDM660_MASTER_IPA = 1,
SDM660_MASTER_CNOC_A2NOC,
SDM660_MASTER_SDCC_1,
SDM660_MASTER_SDCC_2,
SDM660_MASTER_BLSP_1,
SDM660_MASTER_BLSP_2,
SDM660_MASTER_UFS,
SDM660_MASTER_USB_HS,
SDM660_MASTER_USB3,
SDM660_MASTER_CRYPTO_C0,
SDM660_MASTER_GNOC_BIMC,
SDM660_MASTER_OXILI,
SDM660_MASTER_MNOC_BIMC,
SDM660_MASTER_SNOC_BIMC,
SDM660_MASTER_PIMEM,
SDM660_MASTER_SNOC_CNOC,
SDM660_MASTER_QDSS_DAP,
SDM660_MASTER_APPS_PROC,
SDM660_MASTER_CNOC_MNOC_MMSS_CFG,
SDM660_MASTER_CNOC_MNOC_CFG,
SDM660_MASTER_CPP,
SDM660_MASTER_JPEG,
SDM660_MASTER_MDP_P0,
SDM660_MASTER_MDP_P1,
SDM660_MASTER_VENUS,
SDM660_MASTER_VFE,
SDM660_MASTER_QDSS_ETR,
SDM660_MASTER_QDSS_BAM,
SDM660_MASTER_SNOC_CFG,
SDM660_MASTER_BIMC_SNOC,
SDM660_MASTER_A2NOC_SNOC,
SDM660_MASTER_GNOC_SNOC,
SDM660_SLAVE_A2NOC_SNOC,
SDM660_SLAVE_EBI,
SDM660_SLAVE_HMSS_L3,
SDM660_SLAVE_BIMC_SNOC,
SDM660_SLAVE_CNOC_A2NOC,
SDM660_SLAVE_MPM,
SDM660_SLAVE_PMIC_ARB,
SDM660_SLAVE_TLMM_NORTH,
SDM660_SLAVE_TCSR,
SDM660_SLAVE_PIMEM_CFG,
SDM660_SLAVE_IMEM_CFG,
SDM660_SLAVE_MESSAGE_RAM,
SDM660_SLAVE_GLM,
SDM660_SLAVE_BIMC_CFG,
SDM660_SLAVE_PRNG,
SDM660_SLAVE_SPDM,
SDM660_SLAVE_QDSS_CFG,
SDM660_SLAVE_CNOC_MNOC_CFG,
SDM660_SLAVE_SNOC_CFG,
SDM660_SLAVE_QM_CFG,
SDM660_SLAVE_CLK_CTL,
SDM660_SLAVE_MSS_CFG,
SDM660_SLAVE_TLMM_SOUTH,
SDM660_SLAVE_UFS_CFG,
SDM660_SLAVE_A2NOC_CFG,
SDM660_SLAVE_A2NOC_SMMU_CFG,
SDM660_SLAVE_GPUSS_CFG,
SDM660_SLAVE_AHB2PHY,
SDM660_SLAVE_BLSP_1,
SDM660_SLAVE_SDCC_1,
SDM660_SLAVE_SDCC_2,
SDM660_SLAVE_TLMM_CENTER,
SDM660_SLAVE_BLSP_2,
SDM660_SLAVE_PDM,
SDM660_SLAVE_CNOC_MNOC_MMSS_CFG,
SDM660_SLAVE_USB_HS,
SDM660_SLAVE_USB3_0,
SDM660_SLAVE_SRVC_CNOC,
SDM660_SLAVE_GNOC_BIMC,
SDM660_SLAVE_GNOC_SNOC,
SDM660_SLAVE_CAMERA_CFG,
SDM660_SLAVE_CAMERA_THROTTLE_CFG,
SDM660_SLAVE_MISC_CFG,
SDM660_SLAVE_VENUS_THROTTLE_CFG,
SDM660_SLAVE_VENUS_CFG,
SDM660_SLAVE_MMSS_CLK_XPU_CFG,
SDM660_SLAVE_MMSS_CLK_CFG,
SDM660_SLAVE_MNOC_MPU_CFG,
SDM660_SLAVE_DISPLAY_CFG,
SDM660_SLAVE_CSI_PHY_CFG,
SDM660_SLAVE_DISPLAY_THROTTLE_CFG,
SDM660_SLAVE_SMMU_CFG,
SDM660_SLAVE_MNOC_BIMC,
SDM660_SLAVE_SRVC_MNOC,
SDM660_SLAVE_HMSS,
SDM660_SLAVE_LPASS,
SDM660_SLAVE_WLAN,
SDM660_SLAVE_CDSP,
SDM660_SLAVE_IPA,
SDM660_SLAVE_SNOC_BIMC,
SDM660_SLAVE_SNOC_CNOC,
SDM660_SLAVE_IMEM,
SDM660_SLAVE_PIMEM,
SDM660_SLAVE_QDSS_STM,
SDM660_SLAVE_SRVC_SNOC,
SDM660_A2NOC,
SDM660_BIMC,
SDM660_CNOC,
SDM660_GNOC,
SDM660_MNOC,
SDM660_SNOC,
};
static const char * const mm_intf_clocks[] = {
"iface",
};
static const char * const a2noc_intf_clocks[] = {
"ipa",
"ufs_axi",
"aggre2_ufs_axi",
"aggre2_usb3_axi",
"cfg_noc_usb2_axi",
};
static const u16 mas_ipa_links[] = {
SDM660_SLAVE_A2NOC_SNOC
};
static struct qcom_icc_node mas_ipa = {
.name = "mas_ipa",
.id = SDM660_MASTER_IPA,
.buswidth = 8,
.mas_rpm_id = 59,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 3,
.num_links = ARRAY_SIZE(mas_ipa_links),
.links = mas_ipa_links,
};
static const u16 mas_cnoc_a2noc_links[] = {
SDM660_SLAVE_A2NOC_SNOC
};
static struct qcom_icc_node mas_cnoc_a2noc = {
.name = "mas_cnoc_a2noc",
.id = SDM660_MASTER_CNOC_A2NOC,
.buswidth = 8,
.mas_rpm_id = 146,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_cnoc_a2noc_links),
.links = mas_cnoc_a2noc_links,
};
static const u16 mas_sdcc_1_links[] = {
SDM660_SLAVE_A2NOC_SNOC
};
static struct qcom_icc_node mas_sdcc_1 = {
.name = "mas_sdcc_1",
.id = SDM660_MASTER_SDCC_1,
.buswidth = 8,
.mas_rpm_id = 33,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_sdcc_1_links),
.links = mas_sdcc_1_links,
};
static const u16 mas_sdcc_2_links[] = {
SDM660_SLAVE_A2NOC_SNOC
};
static struct qcom_icc_node mas_sdcc_2 = {
.name = "mas_sdcc_2",
.id = SDM660_MASTER_SDCC_2,
.buswidth = 8,
.mas_rpm_id = 35,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_sdcc_2_links),
.links = mas_sdcc_2_links,
};
static const u16 mas_blsp_1_links[] = {
SDM660_SLAVE_A2NOC_SNOC
};
static struct qcom_icc_node mas_blsp_1 = {
.name = "mas_blsp_1",
.id = SDM660_MASTER_BLSP_1,
.buswidth = 4,
.mas_rpm_id = 41,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_blsp_1_links),
.links = mas_blsp_1_links,
};
static const u16 mas_blsp_2_links[] = {
SDM660_SLAVE_A2NOC_SNOC
};
static struct qcom_icc_node mas_blsp_2 = {
.name = "mas_blsp_2",
.id = SDM660_MASTER_BLSP_2,
.buswidth = 4,
.mas_rpm_id = 39,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_blsp_2_links),
.links = mas_blsp_2_links,
};
static const u16 mas_ufs_links[] = {
SDM660_SLAVE_A2NOC_SNOC
};
static struct qcom_icc_node mas_ufs = {
.name = "mas_ufs",
.id = SDM660_MASTER_UFS,
.buswidth = 8,
.mas_rpm_id = 68,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 4,
.num_links = ARRAY_SIZE(mas_ufs_links),
.links = mas_ufs_links,
};
static const u16 mas_usb_hs_links[] = {
SDM660_SLAVE_A2NOC_SNOC
};
static struct qcom_icc_node mas_usb_hs = {
.name = "mas_usb_hs",
.id = SDM660_MASTER_USB_HS,
.buswidth = 8,
.mas_rpm_id = 42,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 1,
.num_links = ARRAY_SIZE(mas_usb_hs_links),
.links = mas_usb_hs_links,
};
static const u16 mas_usb3_links[] = {
SDM660_SLAVE_A2NOC_SNOC
};
static struct qcom_icc_node mas_usb3 = {
.name = "mas_usb3",
.id = SDM660_MASTER_USB3,
.buswidth = 8,
.mas_rpm_id = 32,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 2,
.num_links = ARRAY_SIZE(mas_usb3_links),
.links = mas_usb3_links,
};
static const u16 mas_crypto_links[] = {
SDM660_SLAVE_A2NOC_SNOC
};
static struct qcom_icc_node mas_crypto = {
.name = "mas_crypto",
.id = SDM660_MASTER_CRYPTO_C0,
.buswidth = 8,
.mas_rpm_id = 23,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 11,
.num_links = ARRAY_SIZE(mas_crypto_links),
.links = mas_crypto_links,
};
static const u16 mas_gnoc_bimc_links[] = {
SDM660_SLAVE_EBI
};
static struct qcom_icc_node mas_gnoc_bimc = {
.name = "mas_gnoc_bimc",
.id = SDM660_MASTER_GNOC_BIMC,
.buswidth = 4,
.mas_rpm_id = 144,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_gnoc_bimc_links),
.links = mas_gnoc_bimc_links,
};
static const u16 mas_oxili_links[] = {
SDM660_SLAVE_HMSS_L3,
SDM660_SLAVE_EBI,
SDM660_SLAVE_BIMC_SNOC
};
static struct qcom_icc_node mas_oxili = {
.name = "mas_oxili",
.id = SDM660_MASTER_OXILI,
.buswidth = 4,
.mas_rpm_id = 6,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 1,
.num_links = ARRAY_SIZE(mas_oxili_links),
.links = mas_oxili_links,
};
static const u16 mas_mnoc_bimc_links[] = {
SDM660_SLAVE_HMSS_L3,
SDM660_SLAVE_EBI,
SDM660_SLAVE_BIMC_SNOC
};
static struct qcom_icc_node mas_mnoc_bimc = {
.name = "mas_mnoc_bimc",
.id = SDM660_MASTER_MNOC_BIMC,
.buswidth = 4,
.mas_rpm_id = 2,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 2,
.num_links = ARRAY_SIZE(mas_mnoc_bimc_links),
.links = mas_mnoc_bimc_links,
};
static const u16 mas_snoc_bimc_links[] = {
SDM660_SLAVE_HMSS_L3,
SDM660_SLAVE_EBI
};
static struct qcom_icc_node mas_snoc_bimc = {
.name = "mas_snoc_bimc",
.id = SDM660_MASTER_SNOC_BIMC,
.buswidth = 4,
.mas_rpm_id = 3,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_bimc_links),
.links = mas_snoc_bimc_links,
};
static const u16 mas_pimem_links[] = {
SDM660_SLAVE_HMSS_L3,
SDM660_SLAVE_EBI
};
static struct qcom_icc_node mas_pimem = {
.name = "mas_pimem",
.id = SDM660_MASTER_PIMEM,
.buswidth = 4,
.mas_rpm_id = 113,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 4,
.num_links = ARRAY_SIZE(mas_pimem_links),
.links = mas_pimem_links,
};
static const u16 mas_snoc_cnoc_links[] = {
SDM660_SLAVE_CLK_CTL,
SDM660_SLAVE_QDSS_CFG,
SDM660_SLAVE_QM_CFG,
SDM660_SLAVE_SRVC_CNOC,
SDM660_SLAVE_UFS_CFG,
SDM660_SLAVE_TCSR,
SDM660_SLAVE_A2NOC_SMMU_CFG,
SDM660_SLAVE_SNOC_CFG,
SDM660_SLAVE_TLMM_SOUTH,
SDM660_SLAVE_MPM,
SDM660_SLAVE_CNOC_MNOC_MMSS_CFG,
SDM660_SLAVE_SDCC_2,
SDM660_SLAVE_SDCC_1,
SDM660_SLAVE_SPDM,
SDM660_SLAVE_PMIC_ARB,
SDM660_SLAVE_PRNG,
SDM660_SLAVE_MSS_CFG,
SDM660_SLAVE_GPUSS_CFG,
SDM660_SLAVE_IMEM_CFG,
SDM660_SLAVE_USB3_0,
SDM660_SLAVE_A2NOC_CFG,
SDM660_SLAVE_TLMM_NORTH,
SDM660_SLAVE_USB_HS,
SDM660_SLAVE_PDM,
SDM660_SLAVE_TLMM_CENTER,
SDM660_SLAVE_AHB2PHY,
SDM660_SLAVE_BLSP_2,
SDM660_SLAVE_BLSP_1,
SDM660_SLAVE_PIMEM_CFG,
SDM660_SLAVE_GLM,
SDM660_SLAVE_MESSAGE_RAM,
SDM660_SLAVE_BIMC_CFG,
SDM660_SLAVE_CNOC_MNOC_CFG
};
static struct qcom_icc_node mas_snoc_cnoc = {
.name = "mas_snoc_cnoc",
.id = SDM660_MASTER_SNOC_CNOC,
.buswidth = 8,
.mas_rpm_id = 52,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_snoc_cnoc_links),
.links = mas_snoc_cnoc_links,
};
static const u16 mas_qdss_dap_links[] = {
SDM660_SLAVE_CLK_CTL,
SDM660_SLAVE_QDSS_CFG,
SDM660_SLAVE_QM_CFG,
SDM660_SLAVE_SRVC_CNOC,
SDM660_SLAVE_UFS_CFG,
SDM660_SLAVE_TCSR,
SDM660_SLAVE_A2NOC_SMMU_CFG,
SDM660_SLAVE_SNOC_CFG,
SDM660_SLAVE_TLMM_SOUTH,
SDM660_SLAVE_MPM,
SDM660_SLAVE_CNOC_MNOC_MMSS_CFG,
SDM660_SLAVE_SDCC_2,
SDM660_SLAVE_SDCC_1,
SDM660_SLAVE_SPDM,
SDM660_SLAVE_PMIC_ARB,
SDM660_SLAVE_PRNG,
SDM660_SLAVE_MSS_CFG,
SDM660_SLAVE_GPUSS_CFG,
SDM660_SLAVE_IMEM_CFG,
SDM660_SLAVE_USB3_0,
SDM660_SLAVE_A2NOC_CFG,
SDM660_SLAVE_TLMM_NORTH,
SDM660_SLAVE_USB_HS,
SDM660_SLAVE_PDM,
SDM660_SLAVE_TLMM_CENTER,
SDM660_SLAVE_AHB2PHY,
SDM660_SLAVE_BLSP_2,
SDM660_SLAVE_BLSP_1,
SDM660_SLAVE_PIMEM_CFG,
SDM660_SLAVE_GLM,
SDM660_SLAVE_MESSAGE_RAM,
SDM660_SLAVE_CNOC_A2NOC,
SDM660_SLAVE_BIMC_CFG,
SDM660_SLAVE_CNOC_MNOC_CFG
};
static struct qcom_icc_node mas_qdss_dap = {
.name = "mas_qdss_dap",
.id = SDM660_MASTER_QDSS_DAP,
.buswidth = 8,
.mas_rpm_id = 49,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_qdss_dap_links),
.links = mas_qdss_dap_links,
};
static const u16 mas_apss_proc_links[] = {
SDM660_SLAVE_GNOC_SNOC,
SDM660_SLAVE_GNOC_BIMC
};
static struct qcom_icc_node mas_apss_proc = {
.name = "mas_apss_proc",
.id = SDM660_MASTER_APPS_PROC,
.buswidth = 16,
.mas_rpm_id = 0,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_apss_proc_links),
.links = mas_apss_proc_links,
};
static const u16 mas_cnoc_mnoc_mmss_cfg_links[] = {
SDM660_SLAVE_VENUS_THROTTLE_CFG,
SDM660_SLAVE_VENUS_CFG,
SDM660_SLAVE_CAMERA_THROTTLE_CFG,
SDM660_SLAVE_SMMU_CFG,
SDM660_SLAVE_CAMERA_CFG,
SDM660_SLAVE_CSI_PHY_CFG,
SDM660_SLAVE_DISPLAY_THROTTLE_CFG,
SDM660_SLAVE_DISPLAY_CFG,
SDM660_SLAVE_MMSS_CLK_CFG,
SDM660_SLAVE_MNOC_MPU_CFG,
SDM660_SLAVE_MISC_CFG,
SDM660_SLAVE_MMSS_CLK_XPU_CFG
};
static struct qcom_icc_node mas_cnoc_mnoc_mmss_cfg = {
.name = "mas_cnoc_mnoc_mmss_cfg",
.id = SDM660_MASTER_CNOC_MNOC_MMSS_CFG,
.buswidth = 8,
.mas_rpm_id = 4,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_cnoc_mnoc_mmss_cfg_links),
.links = mas_cnoc_mnoc_mmss_cfg_links,
};
static const u16 mas_cnoc_mnoc_cfg_links[] = {
SDM660_SLAVE_SRVC_MNOC
};
static struct qcom_icc_node mas_cnoc_mnoc_cfg = {
.name = "mas_cnoc_mnoc_cfg",
.id = SDM660_MASTER_CNOC_MNOC_CFG,
.buswidth = 4,
.mas_rpm_id = 5,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mas_cnoc_mnoc_cfg_links),
.links = mas_cnoc_mnoc_cfg_links,
};
static const u16 mas_cpp_links[] = {
SDM660_SLAVE_MNOC_BIMC
};
static struct qcom_icc_node mas_cpp = {
.name = "mas_cpp",
.id = SDM660_MASTER_CPP,
.buswidth = 16,
.mas_rpm_id = 115,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 4,
.num_links = ARRAY_SIZE(mas_cpp_links),
.links = mas_cpp_links,
};
static const u16 mas_jpeg_links[] = {
SDM660_SLAVE_MNOC_BIMC
};
static struct qcom_icc_node mas_jpeg = {
.name = "mas_jpeg",
.id = SDM660_MASTER_JPEG,
.buswidth = 16,
.mas_rpm_id = 7,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 6,
.num_links = ARRAY_SIZE(mas_jpeg_links),
.links = mas_jpeg_links,
};
static const u16 mas_mdp_p0_links[] = {
SDM660_SLAVE_MNOC_BIMC
};
static struct qcom_icc_node mas_mdp_p0 = {
.name = "mas_mdp_p0",
.id = SDM660_MASTER_MDP_P0,
.buswidth = 16,
.mas_rpm_id = 8,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_mdp_p0_links),
.links = mas_mdp_p0_links,
};
static const u16 mas_mdp_p1_links[] = {
SDM660_SLAVE_MNOC_BIMC
};
static struct qcom_icc_node mas_mdp_p1 = {
.name = "mas_mdp_p1",
.id = SDM660_MASTER_MDP_P1,
.buswidth = 16,
.mas_rpm_id = 61,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 1,
.num_links = ARRAY_SIZE(mas_mdp_p1_links),
.links = mas_mdp_p1_links,
};
static const u16 mas_venus_links[] = {
SDM660_SLAVE_MNOC_BIMC
};
static struct qcom_icc_node mas_venus = {
.name = "mas_venus",
.id = SDM660_MASTER_VENUS,
.buswidth = 16,
.mas_rpm_id = 9,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 1,
.num_links = ARRAY_SIZE(mas_venus_links),
.links = mas_venus_links,
};
static const u16 mas_vfe_links[] = {
SDM660_SLAVE_MNOC_BIMC
};
static struct qcom_icc_node mas_vfe = {
.name = "mas_vfe",
.id = SDM660_MASTER_VFE,
.buswidth = 16,
.mas_rpm_id = 11,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 5,
.num_links = ARRAY_SIZE(mas_vfe_links),
.links = mas_vfe_links,
};
static const u16 mas_qdss_etr_links[] = {
SDM660_SLAVE_PIMEM,
SDM660_SLAVE_IMEM,
SDM660_SLAVE_SNOC_CNOC,
SDM660_SLAVE_SNOC_BIMC
};
static struct qcom_icc_node mas_qdss_etr = {
.name = "mas_qdss_etr",
.id = SDM660_MASTER_QDSS_ETR,
.buswidth = 8,
.mas_rpm_id = 31,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 1,
.num_links = ARRAY_SIZE(mas_qdss_etr_links),
.links = mas_qdss_etr_links,
};
static const u16 mas_qdss_bam_links[] = {
SDM660_SLAVE_PIMEM,
SDM660_SLAVE_IMEM,
SDM660_SLAVE_SNOC_CNOC,
SDM660_SLAVE_SNOC_BIMC
};
static struct qcom_icc_node mas_qdss_bam = {
.name = "mas_qdss_bam",
.id = SDM660_MASTER_QDSS_BAM,
.buswidth = 4,
.mas_rpm_id = 19,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_qdss_bam_links),
.links = mas_qdss_bam_links,
};
static const u16 mas_snoc_cfg_links[] = {
SDM660_SLAVE_SRVC_SNOC
};
static struct qcom_icc_node mas_snoc_cfg = {
.name = "mas_snoc_cfg",
.id = SDM660_MASTER_SNOC_CFG,
.buswidth = 4,
.mas_rpm_id = 20,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_cfg_links),
.links = mas_snoc_cfg_links,
};
static const u16 mas_bimc_snoc_links[] = {
SDM660_SLAVE_PIMEM,
SDM660_SLAVE_IPA,
SDM660_SLAVE_QDSS_STM,
SDM660_SLAVE_LPASS,
SDM660_SLAVE_HMSS,
SDM660_SLAVE_CDSP,
SDM660_SLAVE_SNOC_CNOC,
SDM660_SLAVE_WLAN,
SDM660_SLAVE_IMEM
};
static struct qcom_icc_node mas_bimc_snoc = {
.name = "mas_bimc_snoc",
.id = SDM660_MASTER_BIMC_SNOC,
.buswidth = 8,
.mas_rpm_id = 21,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_bimc_snoc_links),
.links = mas_bimc_snoc_links,
};
static const u16 mas_gnoc_snoc_links[] = {
SDM660_SLAVE_PIMEM,
SDM660_SLAVE_IPA,
SDM660_SLAVE_QDSS_STM,
SDM660_SLAVE_LPASS,
SDM660_SLAVE_HMSS,
SDM660_SLAVE_CDSP,
SDM660_SLAVE_SNOC_CNOC,
SDM660_SLAVE_WLAN,
SDM660_SLAVE_IMEM
};
static struct qcom_icc_node mas_gnoc_snoc = {
.name = "mas_gnoc_snoc",
.id = SDM660_MASTER_GNOC_SNOC,
.buswidth = 8,
.mas_rpm_id = 150,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_gnoc_snoc_links),
.links = mas_gnoc_snoc_links,
};
static const u16 mas_a2noc_snoc_links[] = {
SDM660_SLAVE_PIMEM,
SDM660_SLAVE_IPA,
SDM660_SLAVE_QDSS_STM,
SDM660_SLAVE_LPASS,
SDM660_SLAVE_HMSS,
SDM660_SLAVE_SNOC_BIMC,
SDM660_SLAVE_CDSP,
SDM660_SLAVE_SNOC_CNOC,
SDM660_SLAVE_WLAN,
SDM660_SLAVE_IMEM
};
static struct qcom_icc_node mas_a2noc_snoc = {
.name = "mas_a2noc_snoc",
.id = SDM660_MASTER_A2NOC_SNOC,
.buswidth = 16,
.mas_rpm_id = 112,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_a2noc_snoc_links),
.links = mas_a2noc_snoc_links,
};
static const u16 slv_a2noc_snoc_links[] = {
SDM660_MASTER_A2NOC_SNOC
};
static struct qcom_icc_node slv_a2noc_snoc = {
.name = "slv_a2noc_snoc",
.id = SDM660_SLAVE_A2NOC_SNOC,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 143,
.num_links = ARRAY_SIZE(slv_a2noc_snoc_links),
.links = slv_a2noc_snoc_links,
};
static struct qcom_icc_node slv_ebi = {
.name = "slv_ebi",
.id = SDM660_SLAVE_EBI,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 0,
};
static struct qcom_icc_node slv_hmss_l3 = {
.name = "slv_hmss_l3",
.id = SDM660_SLAVE_HMSS_L3,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 160,
};
static const u16 slv_bimc_snoc_links[] = {
SDM660_MASTER_BIMC_SNOC
};
static struct qcom_icc_node slv_bimc_snoc = {
.name = "slv_bimc_snoc",
.id = SDM660_SLAVE_BIMC_SNOC,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 2,
.num_links = ARRAY_SIZE(slv_bimc_snoc_links),
.links = slv_bimc_snoc_links,
};
static const u16 slv_cnoc_a2noc_links[] = {
SDM660_MASTER_CNOC_A2NOC
};
static struct qcom_icc_node slv_cnoc_a2noc = {
.name = "slv_cnoc_a2noc",
.id = SDM660_SLAVE_CNOC_A2NOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 208,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_cnoc_a2noc_links),
.links = slv_cnoc_a2noc_links,
};
static struct qcom_icc_node slv_mpm = {
.name = "slv_mpm",
.id = SDM660_SLAVE_MPM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 62,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_pmic_arb = {
.name = "slv_pmic_arb",
.id = SDM660_SLAVE_PMIC_ARB,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 59,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_tlmm_north = {
.name = "slv_tlmm_north",
.id = SDM660_SLAVE_TLMM_NORTH,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 214,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_tcsr = {
.name = "slv_tcsr",
.id = SDM660_SLAVE_TCSR,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 50,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_pimem_cfg = {
.name = "slv_pimem_cfg",
.id = SDM660_SLAVE_PIMEM_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 167,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_imem_cfg = {
.name = "slv_imem_cfg",
.id = SDM660_SLAVE_IMEM_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 54,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_message_ram = {
.name = "slv_message_ram",
.id = SDM660_SLAVE_MESSAGE_RAM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 55,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_glm = {
.name = "slv_glm",
.id = SDM660_SLAVE_GLM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 209,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_bimc_cfg = {
.name = "slv_bimc_cfg",
.id = SDM660_SLAVE_BIMC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 56,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_prng = {
.name = "slv_prng",
.id = SDM660_SLAVE_PRNG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 44,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_spdm = {
.name = "slv_spdm",
.id = SDM660_SLAVE_SPDM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 60,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_qdss_cfg = {
.name = "slv_qdss_cfg",
.id = SDM660_SLAVE_QDSS_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 63,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static const u16 slv_cnoc_mnoc_cfg_links[] = {
SDM660_MASTER_CNOC_MNOC_CFG
};
static struct qcom_icc_node slv_cnoc_mnoc_cfg = {
.name = "slv_cnoc_mnoc_cfg",
.id = SDM660_SLAVE_CNOC_MNOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 66,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_cnoc_mnoc_cfg_links),
.links = slv_cnoc_mnoc_cfg_links,
};
static struct qcom_icc_node slv_snoc_cfg = {
.name = "slv_snoc_cfg",
.id = SDM660_SLAVE_SNOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 70,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_qm_cfg = {
.name = "slv_qm_cfg",
.id = SDM660_SLAVE_QM_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 212,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_clk_ctl = {
.name = "slv_clk_ctl",
.id = SDM660_SLAVE_CLK_CTL,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 47,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_mss_cfg = {
.name = "slv_mss_cfg",
.id = SDM660_SLAVE_MSS_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 48,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_tlmm_south = {
.name = "slv_tlmm_south",
.id = SDM660_SLAVE_TLMM_SOUTH,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 217,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_ufs_cfg = {
.name = "slv_ufs_cfg",
.id = SDM660_SLAVE_UFS_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 92,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_a2noc_cfg = {
.name = "slv_a2noc_cfg",
.id = SDM660_SLAVE_A2NOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 150,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_a2noc_smmu_cfg = {
.name = "slv_a2noc_smmu_cfg",
.id = SDM660_SLAVE_A2NOC_SMMU_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 152,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_gpuss_cfg = {
.name = "slv_gpuss_cfg",
.id = SDM660_SLAVE_GPUSS_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 11,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_ahb2phy = {
.name = "slv_ahb2phy",
.id = SDM660_SLAVE_AHB2PHY,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 163,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_blsp_1 = {
.name = "slv_blsp_1",
.id = SDM660_SLAVE_BLSP_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 39,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_sdcc_1 = {
.name = "slv_sdcc_1",
.id = SDM660_SLAVE_SDCC_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 31,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_sdcc_2 = {
.name = "slv_sdcc_2",
.id = SDM660_SLAVE_SDCC_2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 33,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_tlmm_center = {
.name = "slv_tlmm_center",
.id = SDM660_SLAVE_TLMM_CENTER,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 218,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_blsp_2 = {
.name = "slv_blsp_2",
.id = SDM660_SLAVE_BLSP_2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 37,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_pdm = {
.name = "slv_pdm",
.id = SDM660_SLAVE_PDM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 41,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static const u16 slv_cnoc_mnoc_mmss_cfg_links[] = {
SDM660_MASTER_CNOC_MNOC_MMSS_CFG
};
static struct qcom_icc_node slv_cnoc_mnoc_mmss_cfg = {
.name = "slv_cnoc_mnoc_mmss_cfg",
.id = SDM660_SLAVE_CNOC_MNOC_MMSS_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 58,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_cnoc_mnoc_mmss_cfg_links),
.links = slv_cnoc_mnoc_mmss_cfg_links,
};
static struct qcom_icc_node slv_usb_hs = {
.name = "slv_usb_hs",
.id = SDM660_SLAVE_USB_HS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 40,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_usb3_0 = {
.name = "slv_usb3_0",
.id = SDM660_SLAVE_USB3_0,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 22,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_srvc_cnoc = {
.name = "slv_srvc_cnoc",
.id = SDM660_SLAVE_SRVC_CNOC,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 76,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static const u16 slv_gnoc_bimc_links[] = {
SDM660_MASTER_GNOC_BIMC
};
static struct qcom_icc_node slv_gnoc_bimc = {
.name = "slv_gnoc_bimc",
.id = SDM660_SLAVE_GNOC_BIMC,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 210,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_gnoc_bimc_links),
.links = slv_gnoc_bimc_links,
};
static const u16 slv_gnoc_snoc_links[] = {
SDM660_MASTER_GNOC_SNOC
};
static struct qcom_icc_node slv_gnoc_snoc = {
.name = "slv_gnoc_snoc",
.id = SDM660_SLAVE_GNOC_SNOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 211,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_gnoc_snoc_links),
.links = slv_gnoc_snoc_links,
};
static struct qcom_icc_node slv_camera_cfg = {
.name = "slv_camera_cfg",
.id = SDM660_SLAVE_CAMERA_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 3,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_camera_throttle_cfg = {
.name = "slv_camera_throttle_cfg",
.id = SDM660_SLAVE_CAMERA_THROTTLE_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 154,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_misc_cfg = {
.name = "slv_misc_cfg",
.id = SDM660_SLAVE_MISC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 8,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_venus_throttle_cfg = {
.name = "slv_venus_throttle_cfg",
.id = SDM660_SLAVE_VENUS_THROTTLE_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 178,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_venus_cfg = {
.name = "slv_venus_cfg",
.id = SDM660_SLAVE_VENUS_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 10,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_mmss_clk_xpu_cfg = {
.name = "slv_mmss_clk_xpu_cfg",
.id = SDM660_SLAVE_MMSS_CLK_XPU_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 13,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_mmss_clk_cfg = {
.name = "slv_mmss_clk_cfg",
.id = SDM660_SLAVE_MMSS_CLK_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 12,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_mnoc_mpu_cfg = {
.name = "slv_mnoc_mpu_cfg",
.id = SDM660_SLAVE_MNOC_MPU_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 14,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_display_cfg = {
.name = "slv_display_cfg",
.id = SDM660_SLAVE_DISPLAY_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 4,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_csi_phy_cfg = {
.name = "slv_csi_phy_cfg",
.id = SDM660_SLAVE_CSI_PHY_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 224,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_display_throttle_cfg = {
.name = "slv_display_throttle_cfg",
.id = SDM660_SLAVE_DISPLAY_THROTTLE_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 156,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_smmu_cfg = {
.name = "slv_smmu_cfg",
.id = SDM660_SLAVE_SMMU_CFG,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 205,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static const u16 slv_mnoc_bimc_links[] = {
SDM660_MASTER_MNOC_BIMC
};
static struct qcom_icc_node slv_mnoc_bimc = {
.name = "slv_mnoc_bimc",
.id = SDM660_SLAVE_MNOC_BIMC,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 16,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(slv_mnoc_bimc_links),
.links = slv_mnoc_bimc_links,
};
static struct qcom_icc_node slv_srvc_mnoc = {
.name = "slv_srvc_mnoc",
.id = SDM660_SLAVE_SRVC_MNOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 17,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_hmss = {
.name = "slv_hmss",
.id = SDM660_SLAVE_HMSS,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 20,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_lpass = {
.name = "slv_lpass",
.id = SDM660_SLAVE_LPASS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 21,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_wlan = {
.name = "slv_wlan",
.id = SDM660_SLAVE_WLAN,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 206,
};
static struct qcom_icc_node slv_cdsp = {
.name = "slv_cdsp",
.id = SDM660_SLAVE_CDSP,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 221,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static struct qcom_icc_node slv_ipa = {
.name = "slv_ipa",
.id = SDM660_SLAVE_IPA,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 183,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
};
static const u16 slv_snoc_bimc_links[] = {
SDM660_MASTER_SNOC_BIMC
};
static struct qcom_icc_node slv_snoc_bimc = {
.name = "slv_snoc_bimc",
.id = SDM660_SLAVE_SNOC_BIMC,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 24,
.num_links = ARRAY_SIZE(slv_snoc_bimc_links),
.links = slv_snoc_bimc_links,
};
static const u16 slv_snoc_cnoc_links[] = {
SDM660_MASTER_SNOC_CNOC
};
static struct qcom_icc_node slv_snoc_cnoc = {
.name = "slv_snoc_cnoc",
.id = SDM660_SLAVE_SNOC_CNOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 25,
.num_links = ARRAY_SIZE(slv_snoc_cnoc_links),
.links = slv_snoc_cnoc_links,
};
static struct qcom_icc_node slv_imem = {
.name = "slv_imem",
.id = SDM660_SLAVE_IMEM,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 26,
};
static struct qcom_icc_node slv_pimem = {
.name = "slv_pimem",
.id = SDM660_SLAVE_PIMEM,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 166,
};
static struct qcom_icc_node slv_qdss_stm = {
.name = "slv_qdss_stm",
.id = SDM660_SLAVE_QDSS_STM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 30,
};
static struct qcom_icc_node slv_srvc_snoc = {
.name = "slv_srvc_snoc",
.id = SDM660_SLAVE_SRVC_SNOC,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 29,
};
static struct qcom_icc_node * const sdm660_a2noc_nodes[] = {
[MASTER_IPA] = &mas_ipa,
[MASTER_CNOC_A2NOC] = &mas_cnoc_a2noc,
[MASTER_SDCC_1] = &mas_sdcc_1,
[MASTER_SDCC_2] = &mas_sdcc_2,
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_BLSP_2] = &mas_blsp_2,
[MASTER_UFS] = &mas_ufs,
[MASTER_USB_HS] = &mas_usb_hs,
[MASTER_USB3] = &mas_usb3,
[MASTER_CRYPTO_C0] = &mas_crypto,
[SLAVE_A2NOC_SNOC] = &slv_a2noc_snoc,
};
static const struct regmap_config sdm660_a2noc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x20000,
.fast_io = true,
};
static const struct qcom_icc_desc sdm660_a2noc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_a2noc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_a2noc_nodes),
.bus_clk_desc = &aggre2_clk,
.intf_clocks = a2noc_intf_clocks,
.num_intf_clocks = ARRAY_SIZE(a2noc_intf_clocks),
.regmap_cfg = &sdm660_a2noc_regmap_config,
};
static struct qcom_icc_node * const sdm660_bimc_nodes[] = {
[MASTER_GNOC_BIMC] = &mas_gnoc_bimc,
[MASTER_OXILI] = &mas_oxili,
[MASTER_MNOC_BIMC] = &mas_mnoc_bimc,
[MASTER_SNOC_BIMC] = &mas_snoc_bimc,
[MASTER_PIMEM] = &mas_pimem,
[SLAVE_EBI] = &slv_ebi,
[SLAVE_HMSS_L3] = &slv_hmss_l3,
[SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
};
static const struct regmap_config sdm660_bimc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x80000,
.fast_io = true,
};
static const struct qcom_icc_desc sdm660_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = sdm660_bimc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_bimc_nodes),
.bus_clk_desc = &bimc_clk,
.regmap_cfg = &sdm660_bimc_regmap_config,
};
static struct qcom_icc_node * const sdm660_cnoc_nodes[] = {
[MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
[MASTER_QDSS_DAP] = &mas_qdss_dap,
[SLAVE_CNOC_A2NOC] = &slv_cnoc_a2noc,
[SLAVE_MPM] = &slv_mpm,
[SLAVE_PMIC_ARB] = &slv_pmic_arb,
[SLAVE_TLMM_NORTH] = &slv_tlmm_north,
[SLAVE_TCSR] = &slv_tcsr,
[SLAVE_PIMEM_CFG] = &slv_pimem_cfg,
[SLAVE_IMEM_CFG] = &slv_imem_cfg,
[SLAVE_MESSAGE_RAM] = &slv_message_ram,
[SLAVE_GLM] = &slv_glm,
[SLAVE_BIMC_CFG] = &slv_bimc_cfg,
[SLAVE_PRNG] = &slv_prng,
[SLAVE_SPDM] = &slv_spdm,
[SLAVE_QDSS_CFG] = &slv_qdss_cfg,
[SLAVE_CNOC_MNOC_CFG] = &slv_cnoc_mnoc_cfg,
[SLAVE_SNOC_CFG] = &slv_snoc_cfg,
[SLAVE_QM_CFG] = &slv_qm_cfg,
[SLAVE_CLK_CTL] = &slv_clk_ctl,
[SLAVE_MSS_CFG] = &slv_mss_cfg,
[SLAVE_TLMM_SOUTH] = &slv_tlmm_south,
[SLAVE_UFS_CFG] = &slv_ufs_cfg,
[SLAVE_A2NOC_CFG] = &slv_a2noc_cfg,
[SLAVE_A2NOC_SMMU_CFG] = &slv_a2noc_smmu_cfg,
[SLAVE_GPUSS_CFG] = &slv_gpuss_cfg,
[SLAVE_AHB2PHY] = &slv_ahb2phy,
[SLAVE_BLSP_1] = &slv_blsp_1,
[SLAVE_SDCC_1] = &slv_sdcc_1,
[SLAVE_SDCC_2] = &slv_sdcc_2,
[SLAVE_TLMM_CENTER] = &slv_tlmm_center,
[SLAVE_BLSP_2] = &slv_blsp_2,
[SLAVE_PDM] = &slv_pdm,
[SLAVE_CNOC_MNOC_MMSS_CFG] = &slv_cnoc_mnoc_mmss_cfg,
[SLAVE_USB_HS] = &slv_usb_hs,
[SLAVE_USB3_0] = &slv_usb3_0,
[SLAVE_SRVC_CNOC] = &slv_srvc_cnoc,
};
static const struct regmap_config sdm660_cnoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x10000,
.fast_io = true,
};
static const struct qcom_icc_desc sdm660_cnoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_cnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_cnoc_nodes),
.bus_clk_desc = &bus_2_clk,
.regmap_cfg = &sdm660_cnoc_regmap_config,
};
static struct qcom_icc_node * const sdm660_gnoc_nodes[] = {
[MASTER_APSS_PROC] = &mas_apss_proc,
[SLAVE_GNOC_BIMC] = &slv_gnoc_bimc,
[SLAVE_GNOC_SNOC] = &slv_gnoc_snoc,
};
static const struct regmap_config sdm660_gnoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0xe000,
.fast_io = true,
};
static const struct qcom_icc_desc sdm660_gnoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_gnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_gnoc_nodes),
.regmap_cfg = &sdm660_gnoc_regmap_config,
};
static struct qcom_icc_node * const sdm660_mnoc_nodes[] = {
[MASTER_CPP] = &mas_cpp,
[MASTER_JPEG] = &mas_jpeg,
[MASTER_MDP_P0] = &mas_mdp_p0,
[MASTER_MDP_P1] = &mas_mdp_p1,
[MASTER_VENUS] = &mas_venus,
[MASTER_VFE] = &mas_vfe,
[MASTER_CNOC_MNOC_MMSS_CFG] = &mas_cnoc_mnoc_mmss_cfg,
[MASTER_CNOC_MNOC_CFG] = &mas_cnoc_mnoc_cfg,
[SLAVE_CAMERA_CFG] = &slv_camera_cfg,
[SLAVE_CAMERA_THROTTLE_CFG] = &slv_camera_throttle_cfg,
[SLAVE_MISC_CFG] = &slv_misc_cfg,
[SLAVE_VENUS_THROTTLE_CFG] = &slv_venus_throttle_cfg,
[SLAVE_VENUS_CFG] = &slv_venus_cfg,
[SLAVE_MMSS_CLK_XPU_CFG] = &slv_mmss_clk_xpu_cfg,
[SLAVE_MMSS_CLK_CFG] = &slv_mmss_clk_cfg,
[SLAVE_MNOC_MPU_CFG] = &slv_mnoc_mpu_cfg,
[SLAVE_DISPLAY_CFG] = &slv_display_cfg,
[SLAVE_CSI_PHY_CFG] = &slv_csi_phy_cfg,
[SLAVE_DISPLAY_THROTTLE_CFG] = &slv_display_throttle_cfg,
[SLAVE_SMMU_CFG] = &slv_smmu_cfg,
[SLAVE_SRVC_MNOC] = &slv_srvc_mnoc,
[SLAVE_MNOC_BIMC] = &slv_mnoc_bimc,
};
static const struct regmap_config sdm660_mnoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x10000,
.fast_io = true,
};
static const struct qcom_icc_desc sdm660_mnoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_mnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_mnoc_nodes),
.bus_clk_desc = &mmaxi_0_clk,
.intf_clocks = mm_intf_clocks,
.num_intf_clocks = ARRAY_SIZE(mm_intf_clocks),
.regmap_cfg = &sdm660_mnoc_regmap_config,
};
static struct qcom_icc_node * const sdm660_snoc_nodes[] = {
[MASTER_QDSS_ETR] = &mas_qdss_etr,
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_SNOC_CFG] = &mas_snoc_cfg,
[MASTER_BIMC_SNOC] = &mas_bimc_snoc,
[MASTER_A2NOC_SNOC] = &mas_a2noc_snoc,
[MASTER_GNOC_SNOC] = &mas_gnoc_snoc,
[SLAVE_HMSS] = &slv_hmss,
[SLAVE_LPASS] = &slv_lpass,
[SLAVE_WLAN] = &slv_wlan,
[SLAVE_CDSP] = &slv_cdsp,
[SLAVE_IPA] = &slv_ipa,
[SLAVE_SNOC_BIMC] = &slv_snoc_bimc,
[SLAVE_SNOC_CNOC] = &slv_snoc_cnoc,
[SLAVE_IMEM] = &slv_imem,
[SLAVE_PIMEM] = &slv_pimem,
[SLAVE_QDSS_STM] = &slv_qdss_stm,
[SLAVE_SRVC_SNOC] = &slv_srvc_snoc,
};
static const struct regmap_config sdm660_snoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x20000,
.fast_io = true,
};
static const struct qcom_icc_desc sdm660_snoc = {
.type = QCOM_ICC_NOC,
.nodes = sdm660_snoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_snoc_nodes),
.bus_clk_desc = &bus_1_clk,
.regmap_cfg = &sdm660_snoc_regmap_config,
};
static const struct of_device_id sdm660_noc_of_match[] = {
{ .compatible = "qcom,sdm660-a2noc", .data = &sdm660_a2noc },
{ .compatible = "qcom,sdm660-bimc", .data = &sdm660_bimc },
{ .compatible = "qcom,sdm660-cnoc", .data = &sdm660_cnoc },
{ .compatible = "qcom,sdm660-gnoc", .data = &sdm660_gnoc },
{ .compatible = "qcom,sdm660-mnoc", .data = &sdm660_mnoc },
{ .compatible = "qcom,sdm660-snoc", .data = &sdm660_snoc },
{ },
};
MODULE_DEVICE_TABLE(of, sdm660_noc_of_match);
static struct platform_driver sdm660_noc_driver = {
.probe = qnoc_probe,
.remove = qnoc_remove,
.driver = {
.name = "qnoc-sdm660",
.of_match_table = sdm660_noc_of_match,
},
};
module_platform_driver(sdm660_noc_driver);
MODULE_DESCRIPTION("Qualcomm sdm660 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/sdm660.c |
// SPDX-License-Identifier: GPL-2.0
/*
* RPM over SMD communication wrapper for interconnects
*
* Copyright (C) 2019 Linaro Ltd
* Author: Georgi Djakov <[email protected]>
*/
#include <linux/interconnect-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/soc/qcom/smd-rpm.h>
#include "icc-rpm.h"
#define RPM_KEY_BW 0x00007762
#define QCOM_RPM_SMD_KEY_RATE 0x007a484b
static struct qcom_smd_rpm *icc_smd_rpm;
struct icc_rpm_smd_req {
__le32 key;
__le32 nbytes;
__le32 value;
};
bool qcom_icc_rpm_smd_available(void)
{
return !!icc_smd_rpm;
}
EXPORT_SYMBOL_GPL(qcom_icc_rpm_smd_available);
int qcom_icc_rpm_smd_send(int ctx, int rsc_type, int id, u32 val)
{
struct icc_rpm_smd_req req = {
.key = cpu_to_le32(RPM_KEY_BW),
.nbytes = cpu_to_le32(sizeof(u32)),
.value = cpu_to_le32(val),
};
return qcom_rpm_smd_write(icc_smd_rpm, ctx, rsc_type, id, &req,
sizeof(req));
}
EXPORT_SYMBOL_GPL(qcom_icc_rpm_smd_send);
int qcom_icc_rpm_set_bus_rate(const struct rpm_clk_resource *clk, int ctx, u32 rate)
{
struct clk_smd_rpm_req req = {
.key = cpu_to_le32(QCOM_RPM_SMD_KEY_RATE),
.nbytes = cpu_to_le32(sizeof(u32)),
};
/* Branch clocks are only on/off */
if (clk->branch)
rate = !!rate;
req.value = cpu_to_le32(rate);
return qcom_rpm_smd_write(icc_smd_rpm,
ctx,
clk->resource_type,
clk->clock_id,
&req, sizeof(req));
}
EXPORT_SYMBOL_GPL(qcom_icc_rpm_set_bus_rate);
static int qcom_icc_rpm_smd_remove(struct platform_device *pdev)
{
icc_smd_rpm = NULL;
return 0;
}
static int qcom_icc_rpm_smd_probe(struct platform_device *pdev)
{
icc_smd_rpm = dev_get_drvdata(pdev->dev.parent);
if (!icc_smd_rpm) {
dev_err(&pdev->dev, "unable to retrieve handle to RPM\n");
return -ENODEV;
}
return 0;
}
static struct platform_driver qcom_interconnect_rpm_smd_driver = {
.driver = {
.name = "icc_smd_rpm",
},
.probe = qcom_icc_rpm_smd_probe,
.remove = qcom_icc_rpm_smd_remove,
};
module_platform_driver(qcom_interconnect_rpm_smd_driver);
MODULE_AUTHOR("Georgi Djakov <[email protected]>");
MODULE_DESCRIPTION("Qualcomm SMD RPM interconnect proxy driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:icc_smd_rpm");
| linux-master | drivers/interconnect/qcom/smd-rpm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2021, Linaro Ltd.
*/
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sc8180x.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sc8180x.h"
static struct qcom_icc_node mas_qhm_a1noc_cfg = {
.name = "mas_qhm_a1noc_cfg",
.id = SC8180X_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_SLAVE_SERVICE_A1NOC }
};
static struct qcom_icc_node mas_xm_ufs_card = {
.name = "mas_xm_ufs_card",
.id = SC8180X_MASTER_UFS_CARD,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_A1NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_xm_ufs_g4 = {
.name = "mas_xm_ufs_g4",
.id = SC8180X_MASTER_UFS_GEN4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_A1NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_xm_ufs_mem = {
.name = "mas_xm_ufs_mem",
.id = SC8180X_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_A1NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_xm_usb3_0 = {
.name = "mas_xm_usb3_0",
.id = SC8180X_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_A1NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_xm_usb3_1 = {
.name = "mas_xm_usb3_1",
.id = SC8180X_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_A1NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_xm_usb3_2 = {
.name = "mas_xm_usb3_2",
.id = SC8180X_MASTER_USB3_2,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8180X_A1NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_qhm_a2noc_cfg = {
.name = "mas_qhm_a2noc_cfg",
.id = SC8180X_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_SLAVE_SERVICE_A2NOC }
};
static struct qcom_icc_node mas_qhm_qdss_bam = {
.name = "mas_qhm_qdss_bam",
.id = SC8180X_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_qhm_qspi = {
.name = "mas_qhm_qspi",
.id = SC8180X_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_qhm_qspi1 = {
.name = "mas_qhm_qspi1",
.id = SC8180X_MASTER_QSPI_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_qhm_qup0 = {
.name = "mas_qhm_qup0",
.id = SC8180X_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_qhm_qup1 = {
.name = "mas_qhm_qup1",
.id = SC8180X_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_qhm_qup2 = {
.name = "mas_qhm_qup2",
.id = SC8180X_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_qhm_sensorss_ahb = {
.name = "mas_qhm_sensorss_ahb",
.id = SC8180X_MASTER_SENSORS_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_qxm_crypto = {
.name = "mas_qxm_crypto",
.id = SC8180X_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_qxm_ipa = {
.name = "mas_qxm_ipa",
.id = SC8180X_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_xm_emac = {
.name = "mas_xm_emac",
.id = SC8180X_MASTER_EMAC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_xm_pcie3_0 = {
.name = "mas_xm_pcie3_0",
.id = SC8180X_MASTER_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
};
static struct qcom_icc_node mas_xm_pcie3_1 = {
.name = "mas_xm_pcie3_1",
.id = SC8180X_MASTER_PCIE_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
};
static struct qcom_icc_node mas_xm_pcie3_2 = {
.name = "mas_xm_pcie3_2",
.id = SC8180X_MASTER_PCIE_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
};
static struct qcom_icc_node mas_xm_pcie3_3 = {
.name = "mas_xm_pcie3_3",
.id = SC8180X_MASTER_PCIE_3,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
};
static struct qcom_icc_node mas_xm_qdss_etr = {
.name = "mas_xm_qdss_etr",
.id = SC8180X_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_xm_sdc2 = {
.name = "mas_xm_sdc2",
.id = SC8180X_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_xm_sdc4 = {
.name = "mas_xm_sdc4",
.id = SC8180X_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_SLV }
};
static struct qcom_icc_node mas_qxm_camnoc_hf0_uncomp = {
.name = "mas_qxm_camnoc_hf0_uncomp",
.id = SC8180X_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
};
static struct qcom_icc_node mas_qxm_camnoc_hf1_uncomp = {
.name = "mas_qxm_camnoc_hf1_uncomp",
.id = SC8180X_MASTER_CAMNOC_HF1_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
};
static struct qcom_icc_node mas_qxm_camnoc_sf_uncomp = {
.name = "mas_qxm_camnoc_sf_uncomp",
.id = SC8180X_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
};
static struct qcom_icc_node mas_qnm_npu = {
.name = "mas_qnm_npu",
.id = SC8180X_MASTER_NPU,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_CDSP_MEM_NOC }
};
static struct qcom_icc_node mas_qnm_snoc = {
.name = "mas_qnm_snoc",
.id = SC8180X_SNOC_CNOC_MAS,
.channels = 1,
.buswidth = 8,
.num_links = 56,
.links = { SC8180X_SLAVE_TLMM_SOUTH,
SC8180X_SLAVE_CDSP_CFG,
SC8180X_SLAVE_SPSS_CFG,
SC8180X_SLAVE_CAMERA_CFG,
SC8180X_SLAVE_SDCC_4,
SC8180X_SLAVE_AHB2PHY_CENTER,
SC8180X_SLAVE_SDCC_2,
SC8180X_SLAVE_PCIE_2_CFG,
SC8180X_SLAVE_CNOC_MNOC_CFG,
SC8180X_SLAVE_EMAC_CFG,
SC8180X_SLAVE_QSPI_0,
SC8180X_SLAVE_QSPI_1,
SC8180X_SLAVE_TLMM_EAST,
SC8180X_SLAVE_SNOC_CFG,
SC8180X_SLAVE_AHB2PHY_EAST,
SC8180X_SLAVE_GLM,
SC8180X_SLAVE_PDM,
SC8180X_SLAVE_PCIE_1_CFG,
SC8180X_SLAVE_A2NOC_CFG,
SC8180X_SLAVE_QDSS_CFG,
SC8180X_SLAVE_DISPLAY_CFG,
SC8180X_SLAVE_TCSR,
SC8180X_SLAVE_UFS_MEM_0_CFG,
SC8180X_SLAVE_CNOC_DDRSS,
SC8180X_SLAVE_PCIE_0_CFG,
SC8180X_SLAVE_QUP_1,
SC8180X_SLAVE_QUP_2,
SC8180X_SLAVE_NPU_CFG,
SC8180X_SLAVE_CRYPTO_0_CFG,
SC8180X_SLAVE_GRAPHICS_3D_CFG,
SC8180X_SLAVE_VENUS_CFG,
SC8180X_SLAVE_TSIF,
SC8180X_SLAVE_IPA_CFG,
SC8180X_SLAVE_CLK_CTL,
SC8180X_SLAVE_SECURITY,
SC8180X_SLAVE_AOP,
SC8180X_SLAVE_AHB2PHY_WEST,
SC8180X_SLAVE_AHB2PHY_SOUTH,
SC8180X_SLAVE_SERVICE_CNOC,
SC8180X_SLAVE_UFS_CARD_CFG,
SC8180X_SLAVE_USB3_1,
SC8180X_SLAVE_USB3_2,
SC8180X_SLAVE_PCIE_3_CFG,
SC8180X_SLAVE_RBCPR_CX_CFG,
SC8180X_SLAVE_TLMM_WEST,
SC8180X_SLAVE_A1NOC_CFG,
SC8180X_SLAVE_AOSS,
SC8180X_SLAVE_PRNG,
SC8180X_SLAVE_VSENSE_CTRL_CFG,
SC8180X_SLAVE_QUP_0,
SC8180X_SLAVE_USB3,
SC8180X_SLAVE_RBCPR_MMCX_CFG,
SC8180X_SLAVE_PIMEM_CFG,
SC8180X_SLAVE_UFS_MEM_1_CFG,
SC8180X_SLAVE_RBCPR_MX_CFG,
SC8180X_SLAVE_IMEM_CFG }
};
static struct qcom_icc_node mas_qhm_cnoc_dc_noc = {
.name = "mas_qhm_cnoc_dc_noc",
.id = SC8180X_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SC8180X_SLAVE_LLCC_CFG,
SC8180X_SLAVE_GEM_NOC_CFG }
};
static struct qcom_icc_node mas_acm_apps = {
.name = "mas_acm_apps",
.id = SC8180X_MASTER_AMPSS_M0,
.channels = 4,
.buswidth = 64,
.num_links = 3,
.links = { SC8180X_SLAVE_ECC,
SC8180X_SLAVE_LLCC,
SC8180X_SLAVE_GEM_NOC_SNOC }
};
static struct qcom_icc_node mas_acm_gpu_tcu = {
.name = "mas_acm_gpu_tcu",
.id = SC8180X_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SC8180X_SLAVE_LLCC,
SC8180X_SLAVE_GEM_NOC_SNOC }
};
static struct qcom_icc_node mas_acm_sys_tcu = {
.name = "mas_acm_sys_tcu",
.id = SC8180X_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SC8180X_SLAVE_LLCC,
SC8180X_SLAVE_GEM_NOC_SNOC }
};
static struct qcom_icc_node mas_qhm_gemnoc_cfg = {
.name = "mas_qhm_gemnoc_cfg",
.id = SC8180X_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 3,
.links = { SC8180X_SLAVE_SERVICE_GEM_NOC_1,
SC8180X_SLAVE_SERVICE_GEM_NOC,
SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG }
};
static struct qcom_icc_node mas_qnm_cmpnoc = {
.name = "mas_qnm_cmpnoc",
.id = SC8180X_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
.links = { SC8180X_SLAVE_ECC,
SC8180X_SLAVE_LLCC,
SC8180X_SLAVE_GEM_NOC_SNOC }
};
static struct qcom_icc_node mas_qnm_gpu = {
.name = "mas_qnm_gpu",
.id = SC8180X_MASTER_GRAPHICS_3D,
.channels = 4,
.buswidth = 32,
.num_links = 2,
.links = { SC8180X_SLAVE_LLCC,
SC8180X_SLAVE_GEM_NOC_SNOC }
};
static struct qcom_icc_node mas_qnm_mnoc_hf = {
.name = "mas_qnm_mnoc_hf",
.id = SC8180X_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_LLCC }
};
static struct qcom_icc_node mas_qnm_mnoc_sf = {
.name = "mas_qnm_mnoc_sf",
.id = SC8180X_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
.links = { SC8180X_SLAVE_LLCC,
SC8180X_SLAVE_GEM_NOC_SNOC }
};
static struct qcom_icc_node mas_qnm_pcie = {
.name = "mas_qnm_pcie",
.id = SC8180X_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
.links = { SC8180X_SLAVE_LLCC,
SC8180X_SLAVE_GEM_NOC_SNOC }
};
static struct qcom_icc_node mas_qnm_snoc_gc = {
.name = "mas_qnm_snoc_gc",
.id = SC8180X_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_SLAVE_LLCC }
};
static struct qcom_icc_node mas_qnm_snoc_sf = {
.name = "mas_qnm_snoc_sf",
.id = SC8180X_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_LLCC }
};
static struct qcom_icc_node mas_qxm_ecc = {
.name = "mas_qxm_ecc",
.id = SC8180X_MASTER_ECC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_LLCC }
};
static struct qcom_icc_node mas_llcc_mc = {
.name = "mas_llcc_mc",
.id = SC8180X_MASTER_LLCC,
.channels = 8,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_SLAVE_EBI_CH0 }
};
static struct qcom_icc_node mas_qhm_mnoc_cfg = {
.name = "mas_qhm_mnoc_cfg",
.id = SC8180X_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_SLAVE_SERVICE_MNOC }
};
static struct qcom_icc_node mas_qxm_camnoc_hf0 = {
.name = "mas_qxm_camnoc_hf0",
.id = SC8180X_MASTER_CAMNOC_HF0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
};
static struct qcom_icc_node mas_qxm_camnoc_hf1 = {
.name = "mas_qxm_camnoc_hf1",
.id = SC8180X_MASTER_CAMNOC_HF1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
};
static struct qcom_icc_node mas_qxm_camnoc_sf = {
.name = "mas_qxm_camnoc_sf",
.id = SC8180X_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
};
static struct qcom_icc_node mas_qxm_mdp0 = {
.name = "mas_qxm_mdp0",
.id = SC8180X_MASTER_MDP_PORT0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
};
static struct qcom_icc_node mas_qxm_mdp1 = {
.name = "mas_qxm_mdp1",
.id = SC8180X_MASTER_MDP_PORT1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
};
static struct qcom_icc_node mas_qxm_rot = {
.name = "mas_qxm_rot",
.id = SC8180X_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
};
static struct qcom_icc_node mas_qxm_venus0 = {
.name = "mas_qxm_venus0",
.id = SC8180X_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
};
static struct qcom_icc_node mas_qxm_venus1 = {
.name = "mas_qxm_venus1",
.id = SC8180X_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
};
static struct qcom_icc_node mas_qxm_venus_arm9 = {
.name = "mas_qxm_venus_arm9",
.id = SC8180X_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
};
static struct qcom_icc_node mas_qhm_snoc_cfg = {
.name = "mas_qhm_snoc_cfg",
.id = SC8180X_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_SLAVE_SERVICE_SNOC }
};
static struct qcom_icc_node mas_qnm_aggre1_noc = {
.name = "mas_qnm_aggre1_noc",
.id = SC8180X_A1NOC_SNOC_MAS,
.channels = 1,
.buswidth = 32,
.num_links = 6,
.links = { SC8180X_SLAVE_SNOC_GEM_NOC_SF,
SC8180X_SLAVE_PIMEM,
SC8180X_SLAVE_OCIMEM,
SC8180X_SLAVE_APPSS,
SC8180X_SNOC_CNOC_SLV,
SC8180X_SLAVE_QDSS_STM }
};
static struct qcom_icc_node mas_qnm_aggre2_noc = {
.name = "mas_qnm_aggre2_noc",
.id = SC8180X_A2NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 11,
.links = { SC8180X_SLAVE_SNOC_GEM_NOC_SF,
SC8180X_SLAVE_PIMEM,
SC8180X_SLAVE_PCIE_3,
SC8180X_SLAVE_OCIMEM,
SC8180X_SLAVE_APPSS,
SC8180X_SLAVE_PCIE_2,
SC8180X_SNOC_CNOC_SLV,
SC8180X_SLAVE_PCIE_0,
SC8180X_SLAVE_PCIE_1,
SC8180X_SLAVE_TCU,
SC8180X_SLAVE_QDSS_STM }
};
static struct qcom_icc_node mas_qnm_gemnoc = {
.name = "mas_qnm_gemnoc",
.id = SC8180X_MASTER_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 6,
.links = { SC8180X_SLAVE_PIMEM,
SC8180X_SLAVE_OCIMEM,
SC8180X_SLAVE_APPSS,
SC8180X_SNOC_CNOC_SLV,
SC8180X_SLAVE_TCU,
SC8180X_SLAVE_QDSS_STM }
};
static struct qcom_icc_node mas_qxm_pimem = {
.name = "mas_qxm_pimem",
.id = SC8180X_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SC8180X_SLAVE_SNOC_GEM_NOC_GC,
SC8180X_SLAVE_OCIMEM }
};
static struct qcom_icc_node mas_xm_gic = {
.name = "mas_xm_gic",
.id = SC8180X_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SC8180X_SLAVE_SNOC_GEM_NOC_GC,
SC8180X_SLAVE_OCIMEM }
};
static struct qcom_icc_node mas_qup_core_0 = {
.name = "mas_qup_core_0",
.id = SC8180X_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_SLAVE_QUP_CORE_0 }
};
static struct qcom_icc_node mas_qup_core_1 = {
.name = "mas_qup_core_1",
.id = SC8180X_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_SLAVE_QUP_CORE_1 }
};
static struct qcom_icc_node mas_qup_core_2 = {
.name = "mas_qup_core_2",
.id = SC8180X_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_SLAVE_QUP_CORE_2 }
};
static struct qcom_icc_node slv_qns_a1noc_snoc = {
.name = "slv_qns_a1noc_snoc",
.id = SC8180X_A1NOC_SNOC_SLV,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_A1NOC_SNOC_MAS }
};
static struct qcom_icc_node slv_srvc_aggre1_noc = {
.name = "slv_srvc_aggre1_noc",
.id = SC8180X_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qns_a2noc_snoc = {
.name = "slv_qns_a2noc_snoc",
.id = SC8180X_A2NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8180X_A2NOC_SNOC_MAS }
};
static struct qcom_icc_node slv_qns_pcie_mem_noc = {
.name = "slv_qns_pcie_mem_noc",
.id = SC8180X_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_MASTER_GEM_NOC_PCIE_SNOC }
};
static struct qcom_icc_node slv_srvc_aggre2_noc = {
.name = "slv_srvc_aggre2_noc",
.id = SC8180X_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qns_camnoc_uncomp = {
.name = "slv_qns_camnoc_uncomp",
.id = SC8180X_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32
};
static struct qcom_icc_node slv_qns_cdsp_mem_noc = {
.name = "slv_qns_cdsp_mem_noc",
.id = SC8180X_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_MASTER_COMPUTE_NOC }
};
static struct qcom_icc_node slv_qhs_a1_noc_cfg = {
.name = "slv_qhs_a1_noc_cfg",
.id = SC8180X_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_MASTER_A1NOC_CFG }
};
static struct qcom_icc_node slv_qhs_a2_noc_cfg = {
.name = "slv_qhs_a2_noc_cfg",
.id = SC8180X_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_MASTER_A2NOC_CFG }
};
static struct qcom_icc_node slv_qhs_ahb2phy_refgen_center = {
.name = "slv_qhs_ahb2phy_refgen_center",
.id = SC8180X_SLAVE_AHB2PHY_CENTER,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ahb2phy_refgen_east = {
.name = "slv_qhs_ahb2phy_refgen_east",
.id = SC8180X_SLAVE_AHB2PHY_EAST,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ahb2phy_refgen_west = {
.name = "slv_qhs_ahb2phy_refgen_west",
.id = SC8180X_SLAVE_AHB2PHY_WEST,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ahb2phy_south = {
.name = "slv_qhs_ahb2phy_south",
.id = SC8180X_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_aop = {
.name = "slv_qhs_aop",
.id = SC8180X_SLAVE_AOP,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_aoss = {
.name = "slv_qhs_aoss",
.id = SC8180X_SLAVE_AOSS,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_camera_cfg = {
.name = "slv_qhs_camera_cfg",
.id = SC8180X_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_clk_ctl = {
.name = "slv_qhs_clk_ctl",
.id = SC8180X_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_compute_dsp = {
.name = "slv_qhs_compute_dsp",
.id = SC8180X_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_cpr_cx = {
.name = "slv_qhs_cpr_cx",
.id = SC8180X_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_cpr_mmcx = {
.name = "slv_qhs_cpr_mmcx",
.id = SC8180X_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_cpr_mx = {
.name = "slv_qhs_cpr_mx",
.id = SC8180X_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_crypto0_cfg = {
.name = "slv_qhs_crypto0_cfg",
.id = SC8180X_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ddrss_cfg = {
.name = "slv_qhs_ddrss_cfg",
.id = SC8180X_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_MASTER_CNOC_DC_NOC }
};
static struct qcom_icc_node slv_qhs_display_cfg = {
.name = "slv_qhs_display_cfg",
.id = SC8180X_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_emac_cfg = {
.name = "slv_qhs_emac_cfg",
.id = SC8180X_SLAVE_EMAC_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_glm = {
.name = "slv_qhs_glm",
.id = SC8180X_SLAVE_GLM,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_gpuss_cfg = {
.name = "slv_qhs_gpuss_cfg",
.id = SC8180X_SLAVE_GRAPHICS_3D_CFG,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_qhs_imem_cfg = {
.name = "slv_qhs_imem_cfg",
.id = SC8180X_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ipa = {
.name = "slv_qhs_ipa",
.id = SC8180X_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_mnoc_cfg = {
.name = "slv_qhs_mnoc_cfg",
.id = SC8180X_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_MASTER_CNOC_MNOC_CFG }
};
static struct qcom_icc_node slv_qhs_npu_cfg = {
.name = "slv_qhs_npu_cfg",
.id = SC8180X_SLAVE_NPU_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_pcie0_cfg = {
.name = "slv_qhs_pcie0_cfg",
.id = SC8180X_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_pcie1_cfg = {
.name = "slv_qhs_pcie1_cfg",
.id = SC8180X_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_pcie2_cfg = {
.name = "slv_qhs_pcie2_cfg",
.id = SC8180X_SLAVE_PCIE_2_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_pcie3_cfg = {
.name = "slv_qhs_pcie3_cfg",
.id = SC8180X_SLAVE_PCIE_3_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_pdm = {
.name = "slv_qhs_pdm",
.id = SC8180X_SLAVE_PDM,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_pimem_cfg = {
.name = "slv_qhs_pimem_cfg",
.id = SC8180X_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_prng = {
.name = "slv_qhs_prng",
.id = SC8180X_SLAVE_PRNG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_qdss_cfg = {
.name = "slv_qhs_qdss_cfg",
.id = SC8180X_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_qspi_0 = {
.name = "slv_qhs_qspi_0",
.id = SC8180X_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_qspi_1 = {
.name = "slv_qhs_qspi_1",
.id = SC8180X_SLAVE_QSPI_1,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_qupv3_east0 = {
.name = "slv_qhs_qupv3_east0",
.id = SC8180X_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_qupv3_east1 = {
.name = "slv_qhs_qupv3_east1",
.id = SC8180X_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_qupv3_west = {
.name = "slv_qhs_qupv3_west",
.id = SC8180X_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_sdc2 = {
.name = "slv_qhs_sdc2",
.id = SC8180X_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_sdc4 = {
.name = "slv_qhs_sdc4",
.id = SC8180X_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_security = {
.name = "slv_qhs_security",
.id = SC8180X_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_snoc_cfg = {
.name = "slv_qhs_snoc_cfg",
.id = SC8180X_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_MASTER_SNOC_CFG }
};
static struct qcom_icc_node slv_qhs_spss_cfg = {
.name = "slv_qhs_spss_cfg",
.id = SC8180X_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_tcsr = {
.name = "slv_qhs_tcsr",
.id = SC8180X_SLAVE_TCSR,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_tlmm_east = {
.name = "slv_qhs_tlmm_east",
.id = SC8180X_SLAVE_TLMM_EAST,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_tlmm_south = {
.name = "slv_qhs_tlmm_south",
.id = SC8180X_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_tlmm_west = {
.name = "slv_qhs_tlmm_west",
.id = SC8180X_SLAVE_TLMM_WEST,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_tsif = {
.name = "slv_qhs_tsif",
.id = SC8180X_SLAVE_TSIF,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ufs_card_cfg = {
.name = "slv_qhs_ufs_card_cfg",
.id = SC8180X_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ufs_mem0_cfg = {
.name = "slv_qhs_ufs_mem0_cfg",
.id = SC8180X_SLAVE_UFS_MEM_0_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ufs_mem1_cfg = {
.name = "slv_qhs_ufs_mem1_cfg",
.id = SC8180X_SLAVE_UFS_MEM_1_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_usb3_0 = {
.name = "slv_qhs_usb3_0",
.id = SC8180X_SLAVE_USB3,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_usb3_1 = {
.name = "slv_qhs_usb3_1",
.id = SC8180X_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_usb3_2 = {
.name = "slv_qhs_usb3_2",
.id = SC8180X_SLAVE_USB3_2,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_venus_cfg = {
.name = "slv_qhs_venus_cfg",
.id = SC8180X_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_vsense_ctrl_cfg = {
.name = "slv_qhs_vsense_ctrl_cfg",
.id = SC8180X_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_srvc_cnoc = {
.name = "slv_srvc_cnoc",
.id = SC8180X_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_gemnoc = {
.name = "slv_qhs_gemnoc",
.id = SC8180X_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8180X_MASTER_GEM_NOC_CFG }
};
static struct qcom_icc_node slv_qhs_llcc = {
.name = "slv_qhs_llcc",
.id = SC8180X_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_mdsp_ms_mpu_cfg = {
.name = "slv_qhs_mdsp_ms_mpu_cfg",
.id = SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qns_ecc = {
.name = "slv_qns_ecc",
.id = SC8180X_SLAVE_ECC,
.channels = 1,
.buswidth = 32
};
static struct qcom_icc_node slv_qns_gem_noc_snoc = {
.name = "slv_qns_gem_noc_snoc",
.id = SC8180X_SLAVE_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_MASTER_GEM_NOC_SNOC }
};
static struct qcom_icc_node slv_qns_llcc = {
.name = "slv_qns_llcc",
.id = SC8180X_SLAVE_LLCC,
.channels = 8,
.buswidth = 16,
.num_links = 1,
.links = { SC8180X_MASTER_LLCC }
};
static struct qcom_icc_node slv_srvc_gemnoc = {
.name = "slv_srvc_gemnoc",
.id = SC8180X_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_srvc_gemnoc1 = {
.name = "slv_srvc_gemnoc1",
.id = SC8180X_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_ebi = {
.name = "slv_ebi",
.id = SC8180X_SLAVE_EBI_CH0,
.channels = 8,
.buswidth = 4
};
static struct qcom_icc_node slv_qns2_mem_noc = {
.name = "slv_qns2_mem_noc",
.id = SC8180X_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_MASTER_MNOC_SF_MEM_NOC }
};
static struct qcom_icc_node slv_qns_mem_noc_hf = {
.name = "slv_qns_mem_noc_hf",
.id = SC8180X_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_MASTER_MNOC_HF_MEM_NOC }
};
static struct qcom_icc_node slv_srvc_mnoc = {
.name = "slv_srvc_mnoc",
.id = SC8180X_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_apss = {
.name = "slv_qhs_apss",
.id = SC8180X_SLAVE_APPSS,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_qns_cnoc = {
.name = "slv_qns_cnoc",
.id = SC8180X_SNOC_CNOC_SLV,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_SNOC_CNOC_MAS }
};
static struct qcom_icc_node slv_qns_gemnoc_gc = {
.name = "slv_qns_gemnoc_gc",
.id = SC8180X_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8180X_MASTER_SNOC_GC_MEM_NOC }
};
static struct qcom_icc_node slv_qns_gemnoc_sf = {
.name = "slv_qns_gemnoc_sf",
.id = SC8180X_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8180X_MASTER_SNOC_SF_MEM_NOC }
};
static struct qcom_icc_node slv_qxs_imem = {
.name = "slv_qxs_imem",
.id = SC8180X_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_qxs_pimem = {
.name = "slv_qxs_pimem",
.id = SC8180X_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_srvc_snoc = {
.name = "slv_srvc_snoc",
.id = SC8180X_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_xs_pcie_0 = {
.name = "slv_xs_pcie_0",
.id = SC8180X_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_xs_pcie_1 = {
.name = "slv_xs_pcie_1",
.id = SC8180X_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_xs_pcie_2 = {
.name = "slv_xs_pcie_2",
.id = SC8180X_SLAVE_PCIE_2,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_xs_pcie_3 = {
.name = "slv_xs_pcie_3",
.id = SC8180X_SLAVE_PCIE_3,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_xs_qdss_stm = {
.name = "slv_xs_qdss_stm",
.id = SC8180X_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_xs_sys_tcu_cfg = {
.name = "slv_xs_sys_tcu_cfg",
.id = SC8180X_SLAVE_TCU,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_qup_core_0 = {
.name = "slv_qup_core_0",
.id = SC8180X_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qup_core_1 = {
.name = "slv_qup_core_1",
.id = SC8180X_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qup_core_2 = {
.name = "slv_qup_core_2",
.id = SC8180X_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.num_nodes = 1,
.nodes = { &slv_ebi }
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &slv_ebi }
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &slv_qns_llcc }
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.num_nodes = 1,
.nodes = { &slv_qns_mem_noc_hf }
};
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
.num_nodes = 1,
.nodes = { &slv_qns_cdsp_mem_noc }
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 1,
.nodes = { &mas_qxm_crypto }
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.keepalive = true,
.num_nodes = 57,
.nodes = { &mas_qnm_snoc,
&slv_qhs_a1_noc_cfg,
&slv_qhs_a2_noc_cfg,
&slv_qhs_ahb2phy_refgen_center,
&slv_qhs_ahb2phy_refgen_east,
&slv_qhs_ahb2phy_refgen_west,
&slv_qhs_ahb2phy_south,
&slv_qhs_aop,
&slv_qhs_aoss,
&slv_qhs_camera_cfg,
&slv_qhs_clk_ctl,
&slv_qhs_compute_dsp,
&slv_qhs_cpr_cx,
&slv_qhs_cpr_mmcx,
&slv_qhs_cpr_mx,
&slv_qhs_crypto0_cfg,
&slv_qhs_ddrss_cfg,
&slv_qhs_display_cfg,
&slv_qhs_emac_cfg,
&slv_qhs_glm,
&slv_qhs_gpuss_cfg,
&slv_qhs_imem_cfg,
&slv_qhs_ipa,
&slv_qhs_mnoc_cfg,
&slv_qhs_npu_cfg,
&slv_qhs_pcie0_cfg,
&slv_qhs_pcie1_cfg,
&slv_qhs_pcie2_cfg,
&slv_qhs_pcie3_cfg,
&slv_qhs_pdm,
&slv_qhs_pimem_cfg,
&slv_qhs_prng,
&slv_qhs_qdss_cfg,
&slv_qhs_qspi_0,
&slv_qhs_qspi_1,
&slv_qhs_qupv3_east0,
&slv_qhs_qupv3_east1,
&slv_qhs_qupv3_west,
&slv_qhs_sdc2,
&slv_qhs_sdc4,
&slv_qhs_security,
&slv_qhs_snoc_cfg,
&slv_qhs_spss_cfg,
&slv_qhs_tcsr,
&slv_qhs_tlmm_east,
&slv_qhs_tlmm_south,
&slv_qhs_tlmm_west,
&slv_qhs_tsif,
&slv_qhs_ufs_card_cfg,
&slv_qhs_ufs_mem0_cfg,
&slv_qhs_ufs_mem1_cfg,
&slv_qhs_usb3_0,
&slv_qhs_usb3_1,
&slv_qhs_usb3_2,
&slv_qhs_venus_cfg,
&slv_qhs_vsense_ctrl_cfg,
&slv_srvc_cnoc }
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.num_nodes = 7,
.nodes = { &mas_qxm_camnoc_hf0_uncomp,
&mas_qxm_camnoc_hf1_uncomp,
&mas_qxm_camnoc_sf_uncomp,
&mas_qxm_camnoc_hf0,
&mas_qxm_camnoc_hf1,
&mas_qxm_mdp0,
&mas_qxm_mdp1 }
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.num_nodes = 3,
.nodes = { &mas_qup_core_0,
&mas_qup_core_1,
&mas_qup_core_2 }
};
static struct qcom_icc_bcm bcm_sh2 = {
.name = "SH2",
.num_nodes = 1,
.nodes = { &slv_qns_gem_noc_snoc }
};
static struct qcom_icc_bcm bcm_mm2 = {
.name = "MM2",
.num_nodes = 6,
.nodes = { &mas_qxm_camnoc_sf,
&mas_qxm_rot,
&mas_qxm_venus0,
&mas_qxm_venus1,
&mas_qxm_venus_arm9,
&slv_qns2_mem_noc }
};
static struct qcom_icc_bcm bcm_sh3 = {
.name = "SH3",
.keepalive = true,
.num_nodes = 1,
.nodes = { &mas_acm_apps }
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.nodes = { &slv_qns_gemnoc_sf }
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.nodes = { &slv_qxs_imem }
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.keepalive = true,
.nodes = { &slv_qns_gemnoc_gc }
};
static struct qcom_icc_bcm bcm_co2 = {
.name = "CO2",
.nodes = { &mas_qnm_npu }
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.keepalive = true,
.nodes = { &slv_srvc_aggre1_noc,
&slv_qns_cnoc }
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.nodes = { &slv_qxs_pimem }
};
static struct qcom_icc_bcm bcm_sn8 = {
.name = "SN8",
.num_nodes = 4,
.nodes = { &slv_xs_pcie_0,
&slv_xs_pcie_1,
&slv_xs_pcie_2,
&slv_xs_pcie_3 }
};
static struct qcom_icc_bcm bcm_sn9 = {
.name = "SN9",
.num_nodes = 1,
.nodes = { &mas_qnm_aggre1_noc }
};
static struct qcom_icc_bcm bcm_sn11 = {
.name = "SN11",
.num_nodes = 1,
.nodes = { &mas_qnm_aggre2_noc }
};
static struct qcom_icc_bcm bcm_sn14 = {
.name = "SN14",
.num_nodes = 1,
.nodes = { &slv_qns_pcie_mem_noc }
};
static struct qcom_icc_bcm bcm_sn15 = {
.name = "SN15",
.keepalive = true,
.num_nodes = 1,
.nodes = { &mas_qnm_gemnoc }
};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn3,
&bcm_ce0,
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_sn14,
&bcm_ce0,
};
static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
&bcm_mm1,
};
static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co2,
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_mc0,
&bcm_acv,
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
&bcm_sn3,
&bcm_sn4,
&bcm_sn8,
&bcm_sn9,
&bcm_sn11,
&bcm_sn15,
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &mas_qhm_a1noc_cfg,
[MASTER_UFS_CARD] = &mas_xm_ufs_card,
[MASTER_UFS_GEN4] = &mas_xm_ufs_g4,
[MASTER_UFS_MEM] = &mas_xm_ufs_mem,
[MASTER_USB3] = &mas_xm_usb3_0,
[MASTER_USB3_1] = &mas_xm_usb3_1,
[MASTER_USB3_2] = &mas_xm_usb3_2,
[A1NOC_SNOC_SLV] = &slv_qns_a1noc_snoc,
[SLAVE_SERVICE_A1NOC] = &slv_srvc_aggre1_noc,
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &mas_qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &mas_qhm_qdss_bam,
[MASTER_QSPI_0] = &mas_qhm_qspi,
[MASTER_QSPI_1] = &mas_qhm_qspi1,
[MASTER_QUP_0] = &mas_qhm_qup0,
[MASTER_QUP_1] = &mas_qhm_qup1,
[MASTER_QUP_2] = &mas_qhm_qup2,
[MASTER_SENSORS_AHB] = &mas_qhm_sensorss_ahb,
[MASTER_CRYPTO_CORE_0] = &mas_qxm_crypto,
[MASTER_IPA] = &mas_qxm_ipa,
[MASTER_EMAC] = &mas_xm_emac,
[MASTER_PCIE] = &mas_xm_pcie3_0,
[MASTER_PCIE_1] = &mas_xm_pcie3_1,
[MASTER_PCIE_2] = &mas_xm_pcie3_2,
[MASTER_PCIE_3] = &mas_xm_pcie3_3,
[MASTER_QDSS_ETR] = &mas_xm_qdss_etr,
[MASTER_SDCC_2] = &mas_xm_sdc2,
[MASTER_SDCC_4] = &mas_xm_sdc4,
[A2NOC_SNOC_SLV] = &slv_qns_a2noc_snoc,
[SLAVE_ANOC_PCIE_GEM_NOC] = &slv_qns_pcie_mem_noc,
[SLAVE_SERVICE_A2NOC] = &slv_srvc_aggre2_noc,
};
static struct qcom_icc_node * const camnoc_virt_nodes[] = {
[MASTER_CAMNOC_HF0_UNCOMP] = &mas_qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_HF1_UNCOMP] = &mas_qxm_camnoc_hf1_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &mas_qxm_camnoc_sf_uncomp,
[SLAVE_CAMNOC_UNCOMP] = &slv_qns_camnoc_uncomp,
};
static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &mas_qnm_npu,
[SLAVE_CDSP_MEM_NOC] = &slv_qns_cdsp_mem_noc,
};
static struct qcom_icc_node * const config_noc_nodes[] = {
[SNOC_CNOC_MAS] = &mas_qnm_snoc,
[SLAVE_A1NOC_CFG] = &slv_qhs_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &slv_qhs_a2_noc_cfg,
[SLAVE_AHB2PHY_CENTER] = &slv_qhs_ahb2phy_refgen_center,
[SLAVE_AHB2PHY_EAST] = &slv_qhs_ahb2phy_refgen_east,
[SLAVE_AHB2PHY_WEST] = &slv_qhs_ahb2phy_refgen_west,
[SLAVE_AHB2PHY_SOUTH] = &slv_qhs_ahb2phy_south,
[SLAVE_AOP] = &slv_qhs_aop,
[SLAVE_AOSS] = &slv_qhs_aoss,
[SLAVE_CAMERA_CFG] = &slv_qhs_camera_cfg,
[SLAVE_CLK_CTL] = &slv_qhs_clk_ctl,
[SLAVE_CDSP_CFG] = &slv_qhs_compute_dsp,
[SLAVE_RBCPR_CX_CFG] = &slv_qhs_cpr_cx,
[SLAVE_RBCPR_MMCX_CFG] = &slv_qhs_cpr_mmcx,
[SLAVE_RBCPR_MX_CFG] = &slv_qhs_cpr_mx,
[SLAVE_CRYPTO_0_CFG] = &slv_qhs_crypto0_cfg,
[SLAVE_CNOC_DDRSS] = &slv_qhs_ddrss_cfg,
[SLAVE_DISPLAY_CFG] = &slv_qhs_display_cfg,
[SLAVE_EMAC_CFG] = &slv_qhs_emac_cfg,
[SLAVE_GLM] = &slv_qhs_glm,
[SLAVE_GRAPHICS_3D_CFG] = &slv_qhs_gpuss_cfg,
[SLAVE_IMEM_CFG] = &slv_qhs_imem_cfg,
[SLAVE_IPA_CFG] = &slv_qhs_ipa,
[SLAVE_CNOC_MNOC_CFG] = &slv_qhs_mnoc_cfg,
[SLAVE_NPU_CFG] = &slv_qhs_npu_cfg,
[SLAVE_PCIE_0_CFG] = &slv_qhs_pcie0_cfg,
[SLAVE_PCIE_1_CFG] = &slv_qhs_pcie1_cfg,
[SLAVE_PCIE_2_CFG] = &slv_qhs_pcie2_cfg,
[SLAVE_PCIE_3_CFG] = &slv_qhs_pcie3_cfg,
[SLAVE_PDM] = &slv_qhs_pdm,
[SLAVE_PIMEM_CFG] = &slv_qhs_pimem_cfg,
[SLAVE_PRNG] = &slv_qhs_prng,
[SLAVE_QDSS_CFG] = &slv_qhs_qdss_cfg,
[SLAVE_QSPI_0] = &slv_qhs_qspi_0,
[SLAVE_QSPI_1] = &slv_qhs_qspi_1,
[SLAVE_QUP_1] = &slv_qhs_qupv3_east0,
[SLAVE_QUP_2] = &slv_qhs_qupv3_east1,
[SLAVE_QUP_0] = &slv_qhs_qupv3_west,
[SLAVE_SDCC_2] = &slv_qhs_sdc2,
[SLAVE_SDCC_4] = &slv_qhs_sdc4,
[SLAVE_SECURITY] = &slv_qhs_security,
[SLAVE_SNOC_CFG] = &slv_qhs_snoc_cfg,
[SLAVE_SPSS_CFG] = &slv_qhs_spss_cfg,
[SLAVE_TCSR] = &slv_qhs_tcsr,
[SLAVE_TLMM_EAST] = &slv_qhs_tlmm_east,
[SLAVE_TLMM_SOUTH] = &slv_qhs_tlmm_south,
[SLAVE_TLMM_WEST] = &slv_qhs_tlmm_west,
[SLAVE_TSIF] = &slv_qhs_tsif,
[SLAVE_UFS_CARD_CFG] = &slv_qhs_ufs_card_cfg,
[SLAVE_UFS_MEM_0_CFG] = &slv_qhs_ufs_mem0_cfg,
[SLAVE_UFS_MEM_1_CFG] = &slv_qhs_ufs_mem1_cfg,
[SLAVE_USB3] = &slv_qhs_usb3_0,
[SLAVE_USB3_1] = &slv_qhs_usb3_1,
[SLAVE_USB3_2] = &slv_qhs_usb3_2,
[SLAVE_VENUS_CFG] = &slv_qhs_venus_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &slv_qhs_vsense_ctrl_cfg,
[SLAVE_SERVICE_CNOC] = &slv_srvc_cnoc,
};
static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &mas_qhm_cnoc_dc_noc,
[SLAVE_GEM_NOC_CFG] = &slv_qhs_gemnoc,
[SLAVE_LLCC_CFG] = &slv_qhs_llcc,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_AMPSS_M0] = &mas_acm_apps,
[MASTER_GPU_TCU] = &mas_acm_gpu_tcu,
[MASTER_SYS_TCU] = &mas_acm_sys_tcu,
[MASTER_GEM_NOC_CFG] = &mas_qhm_gemnoc_cfg,
[MASTER_COMPUTE_NOC] = &mas_qnm_cmpnoc,
[MASTER_GRAPHICS_3D] = &mas_qnm_gpu,
[MASTER_MNOC_HF_MEM_NOC] = &mas_qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &mas_qnm_mnoc_sf,
[MASTER_GEM_NOC_PCIE_SNOC] = &mas_qnm_pcie,
[MASTER_SNOC_GC_MEM_NOC] = &mas_qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &mas_qnm_snoc_sf,
[MASTER_ECC] = &mas_qxm_ecc,
[SLAVE_MSS_PROC_MS_MPU_CFG] = &slv_qhs_mdsp_ms_mpu_cfg,
[SLAVE_ECC] = &slv_qns_ecc,
[SLAVE_GEM_NOC_SNOC] = &slv_qns_gem_noc_snoc,
[SLAVE_LLCC] = &slv_qns_llcc,
[SLAVE_SERVICE_GEM_NOC] = &slv_srvc_gemnoc,
[SLAVE_SERVICE_GEM_NOC_1] = &slv_srvc_gemnoc1,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &mas_llcc_mc,
[SLAVE_EBI_CH0] = &slv_ebi,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &mas_qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &mas_qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &mas_qxm_camnoc_hf1,
[MASTER_CAMNOC_SF] = &mas_qxm_camnoc_sf,
[MASTER_MDP_PORT0] = &mas_qxm_mdp0,
[MASTER_MDP_PORT1] = &mas_qxm_mdp1,
[MASTER_ROTATOR] = &mas_qxm_rot,
[MASTER_VIDEO_P0] = &mas_qxm_venus0,
[MASTER_VIDEO_P1] = &mas_qxm_venus1,
[MASTER_VIDEO_PROC] = &mas_qxm_venus_arm9,
[SLAVE_MNOC_SF_MEM_NOC] = &slv_qns2_mem_noc,
[SLAVE_MNOC_HF_MEM_NOC] = &slv_qns_mem_noc_hf,
[SLAVE_SERVICE_MNOC] = &slv_srvc_mnoc,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &mas_qhm_snoc_cfg,
[A1NOC_SNOC_MAS] = &mas_qnm_aggre1_noc,
[A2NOC_SNOC_MAS] = &mas_qnm_aggre2_noc,
[MASTER_GEM_NOC_SNOC] = &mas_qnm_gemnoc,
[MASTER_PIMEM] = &mas_qxm_pimem,
[MASTER_GIC] = &mas_xm_gic,
[SLAVE_APPSS] = &slv_qhs_apss,
[SNOC_CNOC_SLV] = &slv_qns_cnoc,
[SLAVE_SNOC_GEM_NOC_GC] = &slv_qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &slv_qns_gemnoc_sf,
[SLAVE_OCIMEM] = &slv_qxs_imem,
[SLAVE_PIMEM] = &slv_qxs_pimem,
[SLAVE_SERVICE_SNOC] = &slv_srvc_snoc,
[SLAVE_QDSS_STM] = &slv_xs_qdss_stm,
[SLAVE_TCU] = &slv_xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sc8180x_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static const struct qcom_icc_desc sc8180x_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static const struct qcom_icc_desc sc8180x_camnoc_virt = {
.nodes = camnoc_virt_nodes,
.num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
.bcms = camnoc_virt_bcms,
.num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
};
static const struct qcom_icc_desc sc8180x_compute_noc = {
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
.num_bcms = ARRAY_SIZE(compute_noc_bcms),
};
static const struct qcom_icc_desc sc8180x_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static const struct qcom_icc_desc sc8180x_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
};
static const struct qcom_icc_desc sc8180x_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static const struct qcom_icc_desc sc8180x_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static const struct qcom_icc_desc sc8180x_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static const struct qcom_icc_desc sc8180x_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static struct qcom_icc_bcm * const qup_virt_bcms[] = {
&bcm_qup0,
};
static struct qcom_icc_node * const qup_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &mas_qup_core_0,
[MASTER_QUP_CORE_1] = &mas_qup_core_1,
[MASTER_QUP_CORE_2] = &mas_qup_core_2,
[SLAVE_QUP_CORE_0] = &slv_qup_core_0,
[SLAVE_QUP_CORE_1] = &slv_qup_core_1,
[SLAVE_QUP_CORE_2] = &slv_qup_core_2,
};
static const struct qcom_icc_desc sc8180x_qup_virt = {
.nodes = qup_virt_nodes,
.num_nodes = ARRAY_SIZE(qup_virt_nodes),
.bcms = qup_virt_bcms,
.num_bcms = ARRAY_SIZE(qup_virt_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sc8180x-aggre1-noc", .data = &sc8180x_aggre1_noc },
{ .compatible = "qcom,sc8180x-aggre2-noc", .data = &sc8180x_aggre2_noc },
{ .compatible = "qcom,sc8180x-camnoc-virt", .data = &sc8180x_camnoc_virt },
{ .compatible = "qcom,sc8180x-compute-noc", .data = &sc8180x_compute_noc, },
{ .compatible = "qcom,sc8180x-config-noc", .data = &sc8180x_config_noc },
{ .compatible = "qcom,sc8180x-dc-noc", .data = &sc8180x_dc_noc },
{ .compatible = "qcom,sc8180x-gem-noc", .data = &sc8180x_gem_noc },
{ .compatible = "qcom,sc8180x-mc-virt", .data = &sc8180x_mc_virt },
{ .compatible = "qcom,sc8180x-mmss-noc", .data = &sc8180x_mmss_noc },
{ .compatible = "qcom,sc8180x-qup-virt", .data = &sc8180x_qup_virt },
{ .compatible = "qcom,sc8180x-system-noc", .data = &sc8180x_system_noc },
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sc8180x",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
MODULE_DESCRIPTION("Qualcomm sc8180x NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/sc8180x.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Linaro Ltd
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sc8280xp.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sc8280xp.h"
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
.id = SC8280XP_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.id = SC8280XP_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
.id = SC8280XP_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qnm_a1noc_cfg = {
.name = "qnm_a1noc_cfg",
.id = SC8280XP_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.links = { SC8280XP_SLAVE_SERVICE_A1NOC },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SC8280XP_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_emac_1 = {
.name = "xm_emac_1",
.id = SC8280XP_MASTER_EMAC_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
.id = SC8280XP_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SC8280XP_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.id = SC8280XP_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_USB_NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
.id = SC8280XP_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_USB_NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_mp = {
.name = "xm_usb3_mp",
.id = SC8280XP_MASTER_USB3_MP,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_SLAVE_USB_NOC_SNOC },
};
static struct qcom_icc_node xm_usb4_host0 = {
.name = "xm_usb4_host0",
.id = SC8280XP_MASTER_USB4_0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_SLAVE_USB_NOC_SNOC },
};
static struct qcom_icc_node xm_usb4_host1 = {
.name = "xm_usb4_host1",
.id = SC8280XP_MASTER_USB4_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_SLAVE_USB_NOC_SNOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SC8280XP_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
.id = SC8280XP_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qnm_a2noc_cfg = {
.name = "qnm_a2noc_cfg",
.id = SC8280XP_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_SERVICE_A2NOC },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SC8280XP_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_sensorss_q6 = {
.name = "qxm_sensorss_q6",
.id = SC8280XP_MASTER_SENSORS_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_sp = {
.name = "qxm_sp",
.id = SC8280XP_MASTER_SP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_emac_0 = {
.name = "xm_emac_0",
.id = SC8280XP_MASTER_EMAC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
.id = SC8280XP_MASTER_PCIE_0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
.id = SC8280XP_MASTER_PCIE_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_2a = {
.name = "xm_pcie3_2a",
.id = SC8280XP_MASTER_PCIE_2A,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_2b = {
.name = "xm_pcie3_2b",
.id = SC8280XP_MASTER_PCIE_2B,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_3a = {
.name = "xm_pcie3_3a",
.id = SC8280XP_MASTER_PCIE_3A,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_3b = {
.name = "xm_pcie3_3b",
.id = SC8280XP_MASTER_PCIE_3B,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_4 = {
.name = "xm_pcie3_4",
.id = SC8280XP_MASTER_PCIE_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
.id = SC8280XP_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
.id = SC8280XP_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_card = {
.name = "xm_ufs_card",
.id = SC8280XP_MASTER_UFS_CARD,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = SC8280XP_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_QUP_CORE_0 },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
.id = SC8280XP_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_QUP_CORE_1 },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
.id = SC8280XP_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_QUP_CORE_2 },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
.id = SC8280XP_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 76,
.links = { SC8280XP_SLAVE_AHB2PHY_0,
SC8280XP_SLAVE_AHB2PHY_1,
SC8280XP_SLAVE_AHB2PHY_2,
SC8280XP_SLAVE_AOSS,
SC8280XP_SLAVE_APPSS,
SC8280XP_SLAVE_CAMERA_CFG,
SC8280XP_SLAVE_CLK_CTL,
SC8280XP_SLAVE_CDSP_CFG,
SC8280XP_SLAVE_CDSP1_CFG,
SC8280XP_SLAVE_RBCPR_CX_CFG,
SC8280XP_SLAVE_RBCPR_MMCX_CFG,
SC8280XP_SLAVE_RBCPR_MX_CFG,
SC8280XP_SLAVE_CPR_NSPCX,
SC8280XP_SLAVE_CRYPTO_0_CFG,
SC8280XP_SLAVE_CX_RDPM,
SC8280XP_SLAVE_DCC_CFG,
SC8280XP_SLAVE_DISPLAY_CFG,
SC8280XP_SLAVE_DISPLAY1_CFG,
SC8280XP_SLAVE_EMAC_CFG,
SC8280XP_SLAVE_EMAC1_CFG,
SC8280XP_SLAVE_GFX3D_CFG,
SC8280XP_SLAVE_HWKM,
SC8280XP_SLAVE_IMEM_CFG,
SC8280XP_SLAVE_IPA_CFG,
SC8280XP_SLAVE_IPC_ROUTER_CFG,
SC8280XP_SLAVE_LPASS,
SC8280XP_SLAVE_MX_RDPM,
SC8280XP_SLAVE_MXC_RDPM,
SC8280XP_SLAVE_PCIE_0_CFG,
SC8280XP_SLAVE_PCIE_1_CFG,
SC8280XP_SLAVE_PCIE_2A_CFG,
SC8280XP_SLAVE_PCIE_2B_CFG,
SC8280XP_SLAVE_PCIE_3A_CFG,
SC8280XP_SLAVE_PCIE_3B_CFG,
SC8280XP_SLAVE_PCIE_4_CFG,
SC8280XP_SLAVE_PCIE_RSC_CFG,
SC8280XP_SLAVE_PDM,
SC8280XP_SLAVE_PIMEM_CFG,
SC8280XP_SLAVE_PKA_WRAPPER_CFG,
SC8280XP_SLAVE_PMU_WRAPPER_CFG,
SC8280XP_SLAVE_QDSS_CFG,
SC8280XP_SLAVE_QSPI_0,
SC8280XP_SLAVE_QUP_0,
SC8280XP_SLAVE_QUP_1,
SC8280XP_SLAVE_QUP_2,
SC8280XP_SLAVE_SDCC_2,
SC8280XP_SLAVE_SDCC_4,
SC8280XP_SLAVE_SECURITY,
SC8280XP_SLAVE_SMMUV3_CFG,
SC8280XP_SLAVE_SMSS_CFG,
SC8280XP_SLAVE_SPSS_CFG,
SC8280XP_SLAVE_TCSR,
SC8280XP_SLAVE_TLMM,
SC8280XP_SLAVE_UFS_CARD_CFG,
SC8280XP_SLAVE_UFS_MEM_CFG,
SC8280XP_SLAVE_USB3_0,
SC8280XP_SLAVE_USB3_1,
SC8280XP_SLAVE_USB3_MP,
SC8280XP_SLAVE_USB4_0,
SC8280XP_SLAVE_USB4_1,
SC8280XP_SLAVE_VENUS_CFG,
SC8280XP_SLAVE_VSENSE_CTRL_CFG,
SC8280XP_SLAVE_VSENSE_CTRL_R_CFG,
SC8280XP_SLAVE_A1NOC_CFG,
SC8280XP_SLAVE_A2NOC_CFG,
SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG,
SC8280XP_SLAVE_DDRSS_CFG,
SC8280XP_SLAVE_CNOC_MNOC_CFG,
SC8280XP_SLAVE_SNOC_CFG,
SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG,
SC8280XP_SLAVE_IMEM,
SC8280XP_SLAVE_PIMEM,
SC8280XP_SLAVE_SERVICE_CNOC,
SC8280XP_SLAVE_QDSS_STM,
SC8280XP_SLAVE_SMSS,
SC8280XP_SLAVE_TCU
},
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
.id = SC8280XP_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 7,
.links = { SC8280XP_SLAVE_PCIE_0,
SC8280XP_SLAVE_PCIE_1,
SC8280XP_SLAVE_PCIE_2A,
SC8280XP_SLAVE_PCIE_2B,
SC8280XP_SLAVE_PCIE_3A,
SC8280XP_SLAVE_PCIE_3B,
SC8280XP_SLAVE_PCIE_4
},
};
static struct qcom_icc_node qnm_cnoc_dc_noc = {
.name = "qnm_cnoc_dc_noc",
.id = SC8280XP_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SC8280XP_SLAVE_LLCC_CFG,
SC8280XP_SLAVE_GEM_NOC_CFG
},
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
.id = SC8280XP_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
SC8280XP_SLAVE_LLCC
},
};
static struct qcom_icc_node alm_pcie_tcu = {
.name = "alm_pcie_tcu",
.id = SC8280XP_MASTER_PCIE_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
SC8280XP_SLAVE_LLCC
},
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
.id = SC8280XP_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
SC8280XP_SLAVE_LLCC
},
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
.id = SC8280XP_MASTER_APPSS_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
.links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
SC8280XP_SLAVE_LLCC,
SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC
},
};
static struct qcom_icc_node qnm_cmpnoc0 = {
.name = "qnm_cmpnoc0",
.id = SC8280XP_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
SC8280XP_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_cmpnoc1 = {
.name = "qnm_cmpnoc1",
.id = SC8280XP_MASTER_COMPUTE_NOC_1,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
SC8280XP_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_gemnoc_cfg = {
.name = "qnm_gemnoc_cfg",
.id = SC8280XP_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 3,
.links = { SC8280XP_SLAVE_SERVICE_GEM_NOC_1,
SC8280XP_SLAVE_SERVICE_GEM_NOC_2,
SC8280XP_SLAVE_SERVICE_GEM_NOC
},
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
.id = SC8280XP_MASTER_GFX3D,
.channels = 4,
.buswidth = 32,
.num_links = 2,
.links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
SC8280XP_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SC8280XP_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SC8280XP_SLAVE_LLCC,
SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC
},
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SC8280XP_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
SC8280XP_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
.id = SC8280XP_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
.links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
SC8280XP_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SC8280XP_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SC8280XP_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
SC8280XP_SLAVE_LLCC,
SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC },
};
static struct qcom_icc_node qhm_config_noc = {
.name = "qhm_config_noc",
.id = SC8280XP_MASTER_CNOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 6,
.links = { SC8280XP_SLAVE_LPASS_CORE_CFG,
SC8280XP_SLAVE_LPASS_LPI_CFG,
SC8280XP_SLAVE_LPASS_MPU_CFG,
SC8280XP_SLAVE_LPASS_TOP_CFG,
SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC
},
};
static struct qcom_icc_node qxm_lpass_dsp = {
.name = "qxm_lpass_dsp",
.id = SC8280XP_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 4,
.links = { SC8280XP_SLAVE_LPASS_TOP_CFG,
SC8280XP_SLAVE_LPASS_SNOC,
SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC
},
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SC8280XP_MASTER_LLCC,
.channels = 8,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_EBI1 },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
.id = SC8280XP_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_mdp0_0 = {
.name = "qnm_mdp0_0",
.id = SC8280XP_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_mdp0_1 = {
.name = "qnm_mdp0_1",
.id = SC8280XP_MASTER_MDP1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_mdp1_0 = {
.name = "qnm_mdp1_0",
.id = SC8280XP_MASTER_MDP_CORE1_0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_mdp1_1 = {
.name = "qnm_mdp1_1",
.id = SC8280XP_MASTER_MDP_CORE1_1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_mnoc_cfg = {
.name = "qnm_mnoc_cfg",
.id = SC8280XP_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_SERVICE_MNOC },
};
static struct qcom_icc_node qnm_rot_0 = {
.name = "qnm_rot_0",
.id = SC8280XP_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_rot_1 = {
.name = "qnm_rot_1",
.id = SC8280XP_MASTER_ROTATOR_1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
.id = SC8280XP_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video1 = {
.name = "qnm_video1",
.id = SC8280XP_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
.id = SC8280XP_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_icp = {
.name = "qxm_camnoc_icp",
.id = SC8280XP_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
.id = SC8280XP_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
.name = "qhm_nsp_noc_config",
.id = SC8280XP_MASTER_CDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_SERVICE_NSP_NOC },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
.id = SC8280XP_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SC8280XP_SLAVE_CDSP_MEM_NOC,
SC8280XP_SLAVE_NSP_XFR
},
};
static struct qcom_icc_node qhm_nspb_noc_config = {
.name = "qhm_nspb_noc_config",
.id = SC8280XP_MASTER_CDSPB_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_SERVICE_NSPB_NOC },
};
static struct qcom_icc_node qxm_nspb = {
.name = "qxm_nspb",
.id = SC8280XP_MASTER_CDSP_PROC_B,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SC8280XP_SLAVE_CDSPB_MEM_NOC,
SC8280XP_SLAVE_NSPB_XFR
},
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
.id = SC8280XP_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
.id = SC8280XP_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_aggre_usb_noc = {
.name = "qnm_aggre_usb_noc",
.id = SC8280XP_MASTER_USB_NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_lpass_noc = {
.name = "qnm_lpass_noc",
.id = SC8280XP_MASTER_LPASS_ANOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_snoc_cfg = {
.name = "qnm_snoc_cfg",
.id = SC8280XP_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.id = SC8280XP_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = SC8280XP_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SC8280XP_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_MASTER_A1NOC_SNOC },
};
static struct qcom_icc_node qns_aggre_usb_snoc = {
.name = "qns_aggre_usb_snoc",
.id = SC8280XP_SLAVE_USB_NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_MASTER_USB_NOC_SNOC },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
.id = SC8280XP_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
.id = SC8280XP_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_MASTER_A2NOC_SNOC },
};
static struct qcom_icc_node qns_pcie_gem_noc = {
.name = "qns_pcie_gem_noc",
.id = SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_MASTER_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
.id = SC8280XP_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = SC8280XP_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
.id = SC8280XP_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
.id = SC8280XP_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
.id = SC8280XP_SLAVE_AHB2PHY_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
.id = SC8280XP_SLAVE_AHB2PHY_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy2 = {
.name = "qhs_ahb2phy2",
.id = SC8280XP_SLAVE_AHB2PHY_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SC8280XP_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SC8280XP_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
.id = SC8280XP_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SC8280XP_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute0_cfg = {
.name = "qhs_compute0_cfg",
.id = SC8280XP_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_MASTER_CDSP_NOC_CFG },
};
static struct qcom_icc_node qhs_compute1_cfg = {
.name = "qhs_compute1_cfg",
.id = SC8280XP_SLAVE_CDSP1_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_MASTER_CDSPB_NOC_CFG },
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = SC8280XP_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
.id = SC8280XP_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
.id = SC8280XP_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_nspcx = {
.name = "qhs_cpr_nspcx",
.id = SC8280XP_SLAVE_CPR_NSPCX,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SC8280XP_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
.id = SC8280XP_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
.id = SC8280XP_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display0_cfg = {
.name = "qhs_display0_cfg",
.id = SC8280XP_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display1_cfg = {
.name = "qhs_display1_cfg",
.id = SC8280XP_SLAVE_DISPLAY1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emac0_cfg = {
.name = "qhs_emac0_cfg",
.id = SC8280XP_SLAVE_EMAC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emac1_cfg = {
.name = "qhs_emac1_cfg",
.id = SC8280XP_SLAVE_EMAC1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
.id = SC8280XP_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_hwkm = {
.name = "qhs_hwkm",
.id = SC8280XP_SLAVE_HWKM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SC8280XP_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SC8280XP_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
.id = SC8280XP_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
.id = SC8280XP_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_MASTER_CNOC_LPASS_AG_NOC },
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
.id = SC8280XP_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mxc_rdpm = {
.name = "qhs_mxc_rdpm",
.id = SC8280XP_SLAVE_MXC_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
.id = SC8280XP_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
.id = SC8280XP_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie2a_cfg = {
.name = "qhs_pcie2a_cfg",
.id = SC8280XP_SLAVE_PCIE_2A_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie2b_cfg = {
.name = "qhs_pcie2b_cfg",
.id = SC8280XP_SLAVE_PCIE_2B_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie3a_cfg = {
.name = "qhs_pcie3a_cfg",
.id = SC8280XP_SLAVE_PCIE_3A_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie3b_cfg = {
.name = "qhs_pcie3b_cfg",
.id = SC8280XP_SLAVE_PCIE_3B_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie4_cfg = {
.name = "qhs_pcie4_cfg",
.id = SC8280XP_SLAVE_PCIE_4_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_rsc_cfg = {
.name = "qhs_pcie_rsc_cfg",
.id = SC8280XP_SLAVE_PCIE_RSC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SC8280XP_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = SC8280XP_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pka_wrapper_cfg = {
.name = "qhs_pka_wrapper_cfg",
.id = SC8280XP_SLAVE_PKA_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pmu_wrapper_cfg = {
.name = "qhs_pmu_wrapper_cfg",
.id = SC8280XP_SLAVE_PMU_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SC8280XP_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
.id = SC8280XP_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
.id = SC8280XP_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
.id = SC8280XP_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
.id = SC8280XP_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
.id = SC8280XP_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
.id = SC8280XP_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
.id = SC8280XP_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_smmuv3_cfg = {
.name = "qhs_smmuv3_cfg",
.id = SC8280XP_SLAVE_SMMUV3_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_smss_cfg = {
.name = "qhs_smss_cfg",
.id = SC8280XP_SLAVE_SMSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
.id = SC8280XP_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SC8280XP_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
.id = SC8280XP_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_card_cfg = {
.name = "qhs_ufs_card_cfg",
.id = SC8280XP_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
.id = SC8280XP_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
.id = SC8280XP_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_1 = {
.name = "qhs_usb3_1",
.id = SC8280XP_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_mp = {
.name = "qhs_usb3_mp",
.id = SC8280XP_SLAVE_USB3_MP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb4_host_0 = {
.name = "qhs_usb4_host_0",
.id = SC8280XP_SLAVE_USB4_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb4_host_1 = {
.name = "qhs_usb4_host_1",
.id = SC8280XP_SLAVE_USB4_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
.id = SC8280XP_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
.id = SC8280XP_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_r_cfg = {
.name = "qhs_vsense_ctrl_r_cfg",
.id = SC8280XP_SLAVE_VSENSE_CTRL_R_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a1_noc_cfg = {
.name = "qns_a1_noc_cfg",
.id = SC8280XP_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_MASTER_A1NOC_CFG },
};
static struct qcom_icc_node qns_a2_noc_cfg = {
.name = "qns_a2_noc_cfg",
.id = SC8280XP_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_MASTER_A2NOC_CFG },
};
static struct qcom_icc_node qns_anoc_pcie_bridge_cfg = {
.name = "qns_anoc_pcie_bridge_cfg",
.id = SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
.id = SC8280XP_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_MASTER_CNOC_DC_NOC },
};
static struct qcom_icc_node qns_mnoc_cfg = {
.name = "qns_mnoc_cfg",
.id = SC8280XP_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_MASTER_CNOC_MNOC_CFG },
};
static struct qcom_icc_node qns_snoc_cfg = {
.name = "qns_snoc_cfg",
.id = SC8280XP_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qns_snoc_sf_bridge_cfg = {
.name = "qns_snoc_sf_bridge_cfg",
.id = SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SC8280XP_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
.id = SC8280XP_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
.id = SC8280XP_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
.id = SC8280XP_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 16,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
.id = SC8280XP_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 16,
};
static struct qcom_icc_node xs_pcie_2a = {
.name = "xs_pcie_2a",
.id = SC8280XP_SLAVE_PCIE_2A,
.channels = 1,
.buswidth = 16,
};
static struct qcom_icc_node xs_pcie_2b = {
.name = "xs_pcie_2b",
.id = SC8280XP_SLAVE_PCIE_2B,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_3a = {
.name = "xs_pcie_3a",
.id = SC8280XP_SLAVE_PCIE_3A,
.channels = 1,
.buswidth = 16,
};
static struct qcom_icc_node xs_pcie_3b = {
.name = "xs_pcie_3b",
.id = SC8280XP_SLAVE_PCIE_3B,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_4 = {
.name = "xs_pcie_4",
.id = SC8280XP_SLAVE_PCIE_4,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SC8280XP_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_smss = {
.name = "xs_smss",
.id = SC8280XP_SLAVE_SMSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SC8280XP_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
.id = SC8280XP_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gemnoc = {
.name = "qns_gemnoc",
.id = SC8280XP_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SC8280XP_MASTER_GEM_NOC_CFG },
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
.id = SC8280XP_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_MASTER_GEM_NOC_CNOC },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SC8280XP_SLAVE_LLCC,
.channels = 8,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_MASTER_LLCC },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
.id = SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_MASTER_GEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node srvc_even_gemnoc = {
.name = "srvc_even_gemnoc",
.id = SC8280XP_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_odd_gemnoc = {
.name = "srvc_odd_gemnoc",
.id = SC8280XP_SLAVE_SERVICE_GEM_NOC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_sys_gemnoc = {
.name = "srvc_sys_gemnoc",
.id = SC8280XP_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_core = {
.name = "qhs_lpass_core",
.id = SC8280XP_SLAVE_LPASS_CORE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_lpi = {
.name = "qhs_lpass_lpi",
.id = SC8280XP_SLAVE_LPASS_LPI_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_mpu = {
.name = "qhs_lpass_mpu",
.id = SC8280XP_SLAVE_LPASS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_top = {
.name = "qhs_lpass_top",
.id = SC8280XP_SLAVE_LPASS_TOP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_sysnoc = {
.name = "qns_sysnoc",
.id = SC8280XP_SLAVE_LPASS_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_MASTER_LPASS_ANOC },
};
static struct qcom_icc_node srvc_niu_aml_noc = {
.name = "srvc_niu_aml_noc",
.id = SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_niu_lpass_agnoc = {
.name = "srvc_niu_lpass_agnoc",
.id = SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SC8280XP_SLAVE_EBI1,
.channels = 8,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
.id = SC8280XP_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_MASTER_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
.id = SC8280XP_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_MASTER_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
.id = SC8280XP_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
.id = SC8280XP_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_MASTER_COMPUTE_NOC },
};
static struct qcom_icc_node qxs_nsp_xfr = {
.name = "qxs_nsp_xfr",
.id = SC8280XP_SLAVE_NSP_XFR,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node service_nsp_noc = {
.name = "service_nsp_noc",
.id = SC8280XP_SLAVE_SERVICE_NSP_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_nspb_gemnoc = {
.name = "qns_nspb_gemnoc",
.id = SC8280XP_SLAVE_CDSPB_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SC8280XP_MASTER_COMPUTE_NOC_1 },
};
static struct qcom_icc_node qxs_nspb_xfr = {
.name = "qxs_nspb_xfr",
.id = SC8280XP_SLAVE_NSPB_XFR,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node service_nspb_noc = {
.name = "service_nspb_noc",
.id = SC8280XP_SLAVE_SERVICE_NSPB_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
.id = SC8280XP_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SC8280XP_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
.id = SC8280XP_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SC8280XP_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SC8280XP_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.keepalive = true,
.num_nodes = 9,
.nodes = { &qnm_gemnoc_cnoc,
&qnm_gemnoc_pcie,
&xs_pcie_0,
&xs_pcie_1,
&xs_pcie_2a,
&xs_pcie_2b,
&xs_pcie_3a,
&xs_pcie_3b,
&xs_pcie_4
},
};
static struct qcom_icc_bcm bcm_cn1 = {
.name = "CN1",
.num_nodes = 67,
.nodes = { &qhs_ahb2phy0,
&qhs_ahb2phy1,
&qhs_ahb2phy2,
&qhs_aoss,
&qhs_apss,
&qhs_camera_cfg,
&qhs_clk_ctl,
&qhs_compute0_cfg,
&qhs_compute1_cfg,
&qhs_cpr_cx,
&qhs_cpr_mmcx,
&qhs_cpr_mx,
&qhs_cpr_nspcx,
&qhs_crypto0_cfg,
&qhs_cx_rdpm,
&qhs_dcc_cfg,
&qhs_display0_cfg,
&qhs_display1_cfg,
&qhs_emac0_cfg,
&qhs_emac1_cfg,
&qhs_gpuss_cfg,
&qhs_hwkm,
&qhs_imem_cfg,
&qhs_ipa,
&qhs_ipc_router,
&qhs_lpass_cfg,
&qhs_mx_rdpm,
&qhs_mxc_rdpm,
&qhs_pcie0_cfg,
&qhs_pcie1_cfg,
&qhs_pcie2a_cfg,
&qhs_pcie2b_cfg,
&qhs_pcie3a_cfg,
&qhs_pcie3b_cfg,
&qhs_pcie4_cfg,
&qhs_pcie_rsc_cfg,
&qhs_pdm,
&qhs_pimem_cfg,
&qhs_pka_wrapper_cfg,
&qhs_pmu_wrapper_cfg,
&qhs_qdss_cfg,
&qhs_sdc2,
&qhs_sdc4,
&qhs_security,
&qhs_smmuv3_cfg,
&qhs_smss_cfg,
&qhs_spss_cfg,
&qhs_tcsr,
&qhs_tlmm,
&qhs_ufs_card_cfg,
&qhs_ufs_mem_cfg,
&qhs_usb3_0,
&qhs_usb3_1,
&qhs_usb3_mp,
&qhs_usb4_host_0,
&qhs_usb4_host_1,
&qhs_venus_cfg,
&qhs_vsense_ctrl_cfg,
&qhs_vsense_ctrl_r_cfg,
&qns_a1_noc_cfg,
&qns_a2_noc_cfg,
&qns_anoc_pcie_bridge_cfg,
&qns_ddrss_cfg,
&qns_mnoc_cfg,
&qns_snoc_cfg,
&qns_snoc_sf_bridge_cfg,
&srvc_cnoc
},
};
static struct qcom_icc_bcm bcm_cn2 = {
.name = "CN2",
.num_nodes = 4,
.nodes = { &qhs_qspi,
&qhs_qup0,
&qhs_qup1,
&qhs_qup2
},
};
static struct qcom_icc_bcm bcm_cn3 = {
.name = "CN3",
.num_nodes = 3,
.nodes = { &qxs_imem,
&xs_smss,
&xs_sys_tcu_cfg
},
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.keepalive = true,
.num_nodes = 5,
.nodes = { &qnm_camnoc_hf,
&qnm_mdp0_0,
&qnm_mdp0_1,
&qnm_mdp1_0,
&qns_mem_noc_hf
},
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.num_nodes = 8,
.nodes = { &qnm_rot_0,
&qnm_rot_1,
&qnm_video0,
&qnm_video1,
&qnm_video_cvp,
&qxm_camnoc_icp,
&qxm_camnoc_sf,
&qns_mem_noc_sf
},
};
static struct qcom_icc_bcm bcm_nsa0 = {
.name = "NSA0",
.num_nodes = 2,
.nodes = { &qns_nsp_gemnoc,
&qxs_nsp_xfr
},
};
static struct qcom_icc_bcm bcm_nsa1 = {
.name = "NSA1",
.num_nodes = 1,
.nodes = { &qxm_nsp },
};
static struct qcom_icc_bcm bcm_nsb0 = {
.name = "NSB0",
.num_nodes = 2,
.nodes = { &qns_nspb_gemnoc,
&qxs_nspb_xfr
},
};
static struct qcom_icc_bcm bcm_nsb1 = {
.name = "NSB1",
.num_nodes = 1,
.nodes = { &qxm_nspb },
};
static struct qcom_icc_bcm bcm_pci0 = {
.name = "PCI0",
.num_nodes = 1,
.nodes = { &qns_pcie_gem_noc },
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup0_core_slave },
};
static struct qcom_icc_bcm bcm_qup1 = {
.name = "QUP1",
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup1_core_slave },
};
static struct qcom_icc_bcm bcm_qup2 = {
.name = "QUP2",
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup2_core_slave },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_sh2 = {
.name = "SH2",
.num_nodes = 1,
.nodes = { &chm_apps },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.num_nodes = 1,
.nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.num_nodes = 1,
.nodes = { &qxs_pimem },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.num_nodes = 2,
.nodes = { &qns_a1noc_snoc,
&qnm_aggre1_noc
},
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.num_nodes = 2,
.nodes = { &qns_a2noc_snoc,
&qnm_aggre2_noc
},
};
static struct qcom_icc_bcm bcm_sn5 = {
.name = "SN5",
.num_nodes = 2,
.nodes = { &qns_aggre_usb_snoc,
&qnm_aggre_usb_noc
},
};
static struct qcom_icc_bcm bcm_sn9 = {
.name = "SN9",
.num_nodes = 2,
.nodes = { &qns_sysnoc,
&qnm_lpass_noc
},
};
static struct qcom_icc_bcm bcm_sn10 = {
.name = "SN10",
.num_nodes = 1,
.nodes = { &xs_qdss_stm },
};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn3,
&bcm_sn5,
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_QUP_2] = &qhm_qup2,
[MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
[MASTER_IPA] = &qxm_ipa,
[MASTER_EMAC_1] = &xm_emac_1,
[MASTER_SDCC_4] = &xm_sdc4,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[MASTER_USB3_0] = &xm_usb3_0,
[MASTER_USB3_1] = &xm_usb3_1,
[MASTER_USB3_MP] = &xm_usb3_mp,
[MASTER_USB4_0] = &xm_usb4_host0,
[MASTER_USB4_1] = &xm_usb4_host1,
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
[SLAVE_USB_NOC_SNOC] = &qns_aggre_usb_snoc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
static const struct qcom_icc_desc sc8280xp_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_pci0,
&bcm_sn4,
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
[MASTER_CRYPTO] = &qxm_crypto,
[MASTER_SENSORS_PROC] = &qxm_sensorss_q6,
[MASTER_SP] = &qxm_sp,
[MASTER_EMAC] = &xm_emac_0,
[MASTER_PCIE_0] = &xm_pcie3_0,
[MASTER_PCIE_1] = &xm_pcie3_1,
[MASTER_PCIE_2A] = &xm_pcie3_2a,
[MASTER_PCIE_2B] = &xm_pcie3_2b,
[MASTER_PCIE_3A] = &xm_pcie3_3a,
[MASTER_PCIE_3B] = &xm_pcie3_3b,
[MASTER_PCIE_4] = &xm_pcie3_4,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[MASTER_SDCC_2] = &xm_sdc2,
[MASTER_UFS_CARD] = &xm_ufs_card,
[SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_gem_noc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
static const struct qcom_icc_desc sc8280xp_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
};
static const struct qcom_icc_desc sc8280xp_clk_virt = {
.nodes = clk_virt_nodes,
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
&bcm_cn2,
&bcm_cn3,
&bcm_sn2,
&bcm_sn10,
};
static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[SLAVE_AHB2PHY_0] = &qhs_ahb2phy0,
[SLAVE_AHB2PHY_1] = &qhs_ahb2phy1,
[SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_APPSS] = &qhs_apss,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CDSP_CFG] = &qhs_compute0_cfg,
[SLAVE_CDSP1_CFG] = &qhs_compute1_cfg,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
[SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
[SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_CX_RDPM] = &qhs_cx_rdpm,
[SLAVE_DCC_CFG] = &qhs_dcc_cfg,
[SLAVE_DISPLAY_CFG] = &qhs_display0_cfg,
[SLAVE_DISPLAY1_CFG] = &qhs_display1_cfg,
[SLAVE_EMAC_CFG] = &qhs_emac0_cfg,
[SLAVE_EMAC1_CFG] = &qhs_emac1_cfg,
[SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_HWKM] = &qhs_hwkm,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
[SLAVE_LPASS] = &qhs_lpass_cfg,
[SLAVE_MX_RDPM] = &qhs_mx_rdpm,
[SLAVE_MXC_RDPM] = &qhs_mxc_rdpm,
[SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
[SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
[SLAVE_PCIE_2A_CFG] = &qhs_pcie2a_cfg,
[SLAVE_PCIE_2B_CFG] = &qhs_pcie2b_cfg,
[SLAVE_PCIE_3A_CFG] = &qhs_pcie3a_cfg,
[SLAVE_PCIE_3B_CFG] = &qhs_pcie3b_cfg,
[SLAVE_PCIE_4_CFG] = &qhs_pcie4_cfg,
[SLAVE_PCIE_RSC_CFG] = &qhs_pcie_rsc_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PKA_WRAPPER_CFG] = &qhs_pka_wrapper_cfg,
[SLAVE_PMU_WRAPPER_CFG] = &qhs_pmu_wrapper_cfg,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QSPI_0] = &qhs_qspi,
[SLAVE_QUP_0] = &qhs_qup0,
[SLAVE_QUP_1] = &qhs_qup1,
[SLAVE_QUP_2] = &qhs_qup2,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SDCC_4] = &qhs_sdc4,
[SLAVE_SECURITY] = &qhs_security,
[SLAVE_SMMUV3_CFG] = &qhs_smmuv3_cfg,
[SLAVE_SMSS_CFG] = &qhs_smss_cfg,
[SLAVE_SPSS_CFG] = &qhs_spss_cfg,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM] = &qhs_tlmm,
[SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB3_0] = &qhs_usb3_0,
[SLAVE_USB3_1] = &qhs_usb3_1,
[SLAVE_USB3_MP] = &qhs_usb3_mp,
[SLAVE_USB4_0] = &qhs_usb4_host_0,
[SLAVE_USB4_1] = &qhs_usb4_host_1,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_VSENSE_CTRL_R_CFG] = &qhs_vsense_ctrl_r_cfg,
[SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg,
[SLAVE_ANOC_PCIE_BRIDGE_CFG] = &qns_anoc_pcie_bridge_cfg,
[SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
[SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg,
[SLAVE_SNOC_CFG] = &qns_snoc_cfg,
[SLAVE_SNOC_SF_BRIDGE_CFG] = &qns_snoc_sf_bridge_cfg,
[SLAVE_IMEM] = &qxs_imem,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
[SLAVE_PCIE_0] = &xs_pcie_0,
[SLAVE_PCIE_1] = &xs_pcie_1,
[SLAVE_PCIE_2A] = &xs_pcie_2a,
[SLAVE_PCIE_2B] = &xs_pcie_2b,
[SLAVE_PCIE_3A] = &xs_pcie_3a,
[SLAVE_PCIE_3B] = &xs_pcie_3b,
[SLAVE_PCIE_4] = &xs_pcie_4,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_SMSS] = &xs_smss,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sc8280xp_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
};
static const struct qcom_icc_desc sc8280xp_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_PCIE_TCU] = &alm_pcie_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
[MASTER_COMPUTE_NOC] = &qnm_cmpnoc0,
[MASTER_COMPUTE_NOC_1] = &qnm_cmpnoc1,
[MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg,
[MASTER_GFX3D] = &qnm_gpu,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_GEM_NOC_PCIE_CNOC] = &qns_pcie,
[SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
[SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
[SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
};
static const struct qcom_icc_desc sc8280xp_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
&bcm_sn9,
};
static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[MASTER_LPASS_PROC] = &qxm_lpass_dsp,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
[SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
[SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
[SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
[SLAVE_LPASS_SNOC] = &qns_sysnoc,
[SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
[SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
};
static const struct qcom_icc_desc sc8280xp_lpass_ag_noc = {
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
static const struct qcom_icc_desc sc8280xp_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_MDP0] = &qnm_mdp0_0,
[MASTER_MDP1] = &qnm_mdp0_1,
[MASTER_MDP_CORE1_0] = &qnm_mdp1_0,
[MASTER_MDP_CORE1_1] = &qnm_mdp1_1,
[MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
[MASTER_ROTATOR] = &qnm_rot_0,
[MASTER_ROTATOR_1] = &qnm_rot_1,
[MASTER_VIDEO_P0] = &qnm_video0,
[MASTER_VIDEO_P1] = &qnm_video1,
[MASTER_VIDEO_PROC] = &qnm_video_cvp,
[MASTER_CAMNOC_ICP] = &qxm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
static const struct qcom_icc_desc sc8280xp_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_bcm * const nspa_noc_bcms[] = {
&bcm_nsa0,
&bcm_nsa1,
};
static struct qcom_icc_node * const nspa_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
[SLAVE_NSP_XFR] = &qxs_nsp_xfr,
[SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
};
static const struct qcom_icc_desc sc8280xp_nspa_noc = {
.nodes = nspa_noc_nodes,
.num_nodes = ARRAY_SIZE(nspa_noc_nodes),
.bcms = nspa_noc_bcms,
.num_bcms = ARRAY_SIZE(nspa_noc_bcms),
};
static struct qcom_icc_bcm * const nspb_noc_bcms[] = {
&bcm_nsb0,
&bcm_nsb1,
};
static struct qcom_icc_node * const nspb_noc_nodes[] = {
[MASTER_CDSPB_NOC_CFG] = &qhm_nspb_noc_config,
[MASTER_CDSP_PROC_B] = &qxm_nspb,
[SLAVE_CDSPB_MEM_NOC] = &qns_nspb_gemnoc,
[SLAVE_NSPB_XFR] = &qxs_nspb_xfr,
[SLAVE_SERVICE_NSPB_NOC] = &service_nspb_noc,
};
static const struct qcom_icc_desc sc8280xp_nspb_noc = {
.nodes = nspb_noc_nodes,
.num_nodes = ARRAY_SIZE(nspb_noc_nodes),
.bcms = nspb_noc_bcms,
.num_bcms = ARRAY_SIZE(nspb_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_main_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn3,
&bcm_sn4,
&bcm_sn5,
&bcm_sn9,
};
static struct qcom_icc_node * const system_noc_main_nodes[] = {
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_USB_NOC_SNOC] = &qnm_aggre_usb_noc,
[MASTER_LPASS_ANOC] = &qnm_lpass_noc,
[MASTER_SNOC_CFG] = &qnm_snoc_cfg,
[MASTER_PIMEM] = &qxm_pimem,
[MASTER_GIC] = &xm_gic,
[SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
};
static const struct qcom_icc_desc sc8280xp_system_noc_main = {
.nodes = system_noc_main_nodes,
.num_nodes = ARRAY_SIZE(system_noc_main_nodes),
.bcms = system_noc_main_bcms,
.num_bcms = ARRAY_SIZE(system_noc_main_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sc8280xp-aggre1-noc", .data = &sc8280xp_aggre1_noc, },
{ .compatible = "qcom,sc8280xp-aggre2-noc", .data = &sc8280xp_aggre2_noc, },
{ .compatible = "qcom,sc8280xp-clk-virt", .data = &sc8280xp_clk_virt, },
{ .compatible = "qcom,sc8280xp-config-noc", .data = &sc8280xp_config_noc, },
{ .compatible = "qcom,sc8280xp-dc-noc", .data = &sc8280xp_dc_noc, },
{ .compatible = "qcom,sc8280xp-gem-noc", .data = &sc8280xp_gem_noc, },
{ .compatible = "qcom,sc8280xp-lpass-ag-noc", .data = &sc8280xp_lpass_ag_noc, },
{ .compatible = "qcom,sc8280xp-mc-virt", .data = &sc8280xp_mc_virt, },
{ .compatible = "qcom,sc8280xp-mmss-noc", .data = &sc8280xp_mmss_noc, },
{ .compatible = "qcom,sc8280xp-nspa-noc", .data = &sc8280xp_nspa_noc, },
{ .compatible = "qcom,sc8280xp-nspb-noc", .data = &sc8280xp_nspb_noc, },
{ .compatible = "qcom,sc8280xp-system-noc", .data = &sc8280xp_system_noc_main, },
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sc8280xp",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
static int __init qnoc_driver_init(void)
{
return platform_driver_register(&qnoc_driver);
}
core_initcall(qnoc_driver_init);
static void __exit qnoc_driver_exit(void)
{
platform_driver_unregister(&qnoc_driver);
}
module_exit(qnoc_driver_exit);
MODULE_DESCRIPTION("Qualcomm SC8280XP NoC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/interconnect/qcom/sc8280xp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm SDX55 interconnect driver
* Author: Manivannan Sadhasivam <[email protected]>
*
* Copyright (c) 2021, Linaro Ltd.
*
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sdx55.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sdx55.h"
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SDX55_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SDX55_SLAVE_EBI_CH0 },
};
static struct qcom_icc_node acm_tcu = {
.name = "acm_tcu",
.id = SDX55_MASTER_TCU_0,
.channels = 1,
.buswidth = 8,
.num_links = 3,
.links = { SDX55_SLAVE_LLCC,
SDX55_SLAVE_MEM_NOC_SNOC,
SDX55_SLAVE_MEM_NOC_PCIE_SNOC
},
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SDX55_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX55_SLAVE_LLCC },
};
static struct qcom_icc_node xm_apps_rdwr = {
.name = "xm_apps_rdwr",
.id = SDX55_MASTER_AMPSS_M0,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SDX55_SLAVE_LLCC,
SDX55_SLAVE_MEM_NOC_SNOC,
SDX55_SLAVE_MEM_NOC_PCIE_SNOC
},
};
static struct qcom_icc_node qhm_audio = {
.name = "qhm_audio",
.id = SDX55_MASTER_AUDIO,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDX55_SLAVE_ANOC_SNOC },
};
static struct qcom_icc_node qhm_blsp1 = {
.name = "qhm_blsp1",
.id = SDX55_MASTER_BLSP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDX55_SLAVE_ANOC_SNOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SDX55_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 28,
.links = { SDX55_SLAVE_SNOC_CFG,
SDX55_SLAVE_EMAC_CFG,
SDX55_SLAVE_USB3,
SDX55_SLAVE_TLMM,
SDX55_SLAVE_SPMI_FETCHER,
SDX55_SLAVE_QDSS_CFG,
SDX55_SLAVE_PDM,
SDX55_SLAVE_SNOC_MEM_NOC_GC,
SDX55_SLAVE_TCSR,
SDX55_SLAVE_CNOC_DDRSS,
SDX55_SLAVE_SPMI_VGI_COEX,
SDX55_SLAVE_QPIC,
SDX55_SLAVE_OCIMEM,
SDX55_SLAVE_IPA_CFG,
SDX55_SLAVE_USB3_PHY_CFG,
SDX55_SLAVE_AOP,
SDX55_SLAVE_BLSP_1,
SDX55_SLAVE_SDCC_1,
SDX55_SLAVE_CNOC_MSS,
SDX55_SLAVE_PCIE_PARF,
SDX55_SLAVE_ECC_CFG,
SDX55_SLAVE_AUDIO,
SDX55_SLAVE_AOSS,
SDX55_SLAVE_PRNG,
SDX55_SLAVE_CRYPTO_0_CFG,
SDX55_SLAVE_TCU,
SDX55_SLAVE_CLK_CTL,
SDX55_SLAVE_IMEM_CFG
},
};
static struct qcom_icc_node qhm_qpic = {
.name = "qhm_qpic",
.id = SDX55_MASTER_QPIC,
.channels = 1,
.buswidth = 4,
.num_links = 5,
.links = { SDX55_SLAVE_AOSS,
SDX55_SLAVE_IPA_CFG,
SDX55_SLAVE_ANOC_SNOC,
SDX55_SLAVE_AOP,
SDX55_SLAVE_AUDIO
},
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
.id = SDX55_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDX55_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qhm_spmi_fetcher1 = {
.name = "qhm_spmi_fetcher1",
.id = SDX55_MASTER_SPMI_FETCHER,
.channels = 1,
.buswidth = 4,
.num_links = 3,
.links = { SDX55_SLAVE_AOSS,
SDX55_SLAVE_ANOC_SNOC,
SDX55_SLAVE_AOP
},
};
static struct qcom_icc_node qnm_aggre_noc = {
.name = "qnm_aggre_noc",
.id = SDX55_MASTER_ANOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 30,
.links = { SDX55_SLAVE_PCIE_0,
SDX55_SLAVE_SNOC_CFG,
SDX55_SLAVE_SDCC_1,
SDX55_SLAVE_TLMM,
SDX55_SLAVE_SPMI_FETCHER,
SDX55_SLAVE_QDSS_CFG,
SDX55_SLAVE_PDM,
SDX55_SLAVE_SNOC_MEM_NOC_GC,
SDX55_SLAVE_TCSR,
SDX55_SLAVE_CNOC_DDRSS,
SDX55_SLAVE_SPMI_VGI_COEX,
SDX55_SLAVE_QDSS_STM,
SDX55_SLAVE_QPIC,
SDX55_SLAVE_OCIMEM,
SDX55_SLAVE_IPA_CFG,
SDX55_SLAVE_USB3_PHY_CFG,
SDX55_SLAVE_AOP,
SDX55_SLAVE_BLSP_1,
SDX55_SLAVE_USB3,
SDX55_SLAVE_CNOC_MSS,
SDX55_SLAVE_PCIE_PARF,
SDX55_SLAVE_ECC_CFG,
SDX55_SLAVE_APPSS,
SDX55_SLAVE_AUDIO,
SDX55_SLAVE_AOSS,
SDX55_SLAVE_PRNG,
SDX55_SLAVE_CRYPTO_0_CFG,
SDX55_SLAVE_TCU,
SDX55_SLAVE_CLK_CTL,
SDX55_SLAVE_IMEM_CFG
},
};
static struct qcom_icc_node qnm_ipa = {
.name = "qnm_ipa",
.id = SDX55_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 27,
.links = { SDX55_SLAVE_SNOC_CFG,
SDX55_SLAVE_EMAC_CFG,
SDX55_SLAVE_USB3,
SDX55_SLAVE_AOSS,
SDX55_SLAVE_SPMI_FETCHER,
SDX55_SLAVE_QDSS_CFG,
SDX55_SLAVE_PDM,
SDX55_SLAVE_SNOC_MEM_NOC_GC,
SDX55_SLAVE_TCSR,
SDX55_SLAVE_CNOC_DDRSS,
SDX55_SLAVE_QDSS_STM,
SDX55_SLAVE_QPIC,
SDX55_SLAVE_OCIMEM,
SDX55_SLAVE_IPA_CFG,
SDX55_SLAVE_USB3_PHY_CFG,
SDX55_SLAVE_AOP,
SDX55_SLAVE_BLSP_1,
SDX55_SLAVE_SDCC_1,
SDX55_SLAVE_CNOC_MSS,
SDX55_SLAVE_PCIE_PARF,
SDX55_SLAVE_ECC_CFG,
SDX55_SLAVE_AUDIO,
SDX55_SLAVE_TLMM,
SDX55_SLAVE_PRNG,
SDX55_SLAVE_CRYPTO_0_CFG,
SDX55_SLAVE_CLK_CTL,
SDX55_SLAVE_IMEM_CFG
},
};
static struct qcom_icc_node qnm_memnoc = {
.name = "qnm_memnoc",
.id = SDX55_MASTER_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 29,
.links = { SDX55_SLAVE_SNOC_CFG,
SDX55_SLAVE_EMAC_CFG,
SDX55_SLAVE_USB3,
SDX55_SLAVE_TLMM,
SDX55_SLAVE_SPMI_FETCHER,
SDX55_SLAVE_QDSS_CFG,
SDX55_SLAVE_PDM,
SDX55_SLAVE_TCSR,
SDX55_SLAVE_CNOC_DDRSS,
SDX55_SLAVE_SPMI_VGI_COEX,
SDX55_SLAVE_QDSS_STM,
SDX55_SLAVE_QPIC,
SDX55_SLAVE_OCIMEM,
SDX55_SLAVE_IPA_CFG,
SDX55_SLAVE_USB3_PHY_CFG,
SDX55_SLAVE_AOP,
SDX55_SLAVE_BLSP_1,
SDX55_SLAVE_SDCC_1,
SDX55_SLAVE_CNOC_MSS,
SDX55_SLAVE_PCIE_PARF,
SDX55_SLAVE_ECC_CFG,
SDX55_SLAVE_APPSS,
SDX55_SLAVE_AUDIO,
SDX55_SLAVE_AOSS,
SDX55_SLAVE_PRNG,
SDX55_SLAVE_CRYPTO_0_CFG,
SDX55_SLAVE_TCU,
SDX55_SLAVE_CLK_CTL,
SDX55_SLAVE_IMEM_CFG
},
};
static struct qcom_icc_node qnm_memnoc_pcie = {
.name = "qnm_memnoc_pcie",
.id = SDX55_MASTER_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX55_SLAVE_PCIE_0 },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SDX55_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
.num_links = 3,
.links = { SDX55_SLAVE_AOSS,
SDX55_SLAVE_ANOC_SNOC,
SDX55_SLAVE_AOP
},
};
static struct qcom_icc_node xm_emac = {
.name = "xm_emac",
.id = SDX55_MASTER_EMAC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX55_SLAVE_ANOC_SNOC },
};
static struct qcom_icc_node xm_ipa2pcie_slv = {
.name = "xm_ipa2pcie_slv",
.id = SDX55_MASTER_IPA_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX55_SLAVE_PCIE_0 },
};
static struct qcom_icc_node xm_pcie = {
.name = "xm_pcie",
.id = SDX55_MASTER_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX55_SLAVE_ANOC_SNOC },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
.id = SDX55_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 28,
.links = { SDX55_SLAVE_SNOC_CFG,
SDX55_SLAVE_EMAC_CFG,
SDX55_SLAVE_USB3,
SDX55_SLAVE_AOSS,
SDX55_SLAVE_SPMI_FETCHER,
SDX55_SLAVE_QDSS_CFG,
SDX55_SLAVE_PDM,
SDX55_SLAVE_SNOC_MEM_NOC_GC,
SDX55_SLAVE_TCSR,
SDX55_SLAVE_CNOC_DDRSS,
SDX55_SLAVE_SPMI_VGI_COEX,
SDX55_SLAVE_QPIC,
SDX55_SLAVE_OCIMEM,
SDX55_SLAVE_IPA_CFG,
SDX55_SLAVE_USB3_PHY_CFG,
SDX55_SLAVE_AOP,
SDX55_SLAVE_BLSP_1,
SDX55_SLAVE_SDCC_1,
SDX55_SLAVE_CNOC_MSS,
SDX55_SLAVE_PCIE_PARF,
SDX55_SLAVE_ECC_CFG,
SDX55_SLAVE_AUDIO,
SDX55_SLAVE_AOSS,
SDX55_SLAVE_PRNG,
SDX55_SLAVE_CRYPTO_0_CFG,
SDX55_SLAVE_TCU,
SDX55_SLAVE_CLK_CTL,
SDX55_SLAVE_IMEM_CFG
},
};
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
.id = SDX55_MASTER_SDCC_1,
.channels = 1,
.buswidth = 8,
.num_links = 5,
.links = { SDX55_SLAVE_AOSS,
SDX55_SLAVE_IPA_CFG,
SDX55_SLAVE_ANOC_SNOC,
SDX55_SLAVE_AOP,
SDX55_SLAVE_AUDIO
},
};
static struct qcom_icc_node xm_usb3 = {
.name = "xm_usb3",
.id = SDX55_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX55_SLAVE_ANOC_SNOC },
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SDX55_SLAVE_EBI_CH0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SDX55_SLAVE_LLCC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SDX55_SLAVE_EBI_CH0 },
};
static struct qcom_icc_node qns_memnoc_snoc = {
.name = "qns_memnoc_snoc",
.id = SDX55_SLAVE_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX55_MASTER_MEM_NOC_SNOC },
};
static struct qcom_icc_node qns_sys_pcie = {
.name = "qns_sys_pcie",
.id = SDX55_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX55_MASTER_MEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qhs_aop = {
.name = "qhs_aop",
.id = SDX55_SLAVE_AOP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SDX55_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SDX55_SLAVE_APPSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_audio = {
.name = "qhs_audio",
.id = SDX55_SLAVE_AUDIO,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_blsp1 = {
.name = "qhs_blsp1",
.id = SDX55_SLAVE_BLSP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SDX55_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SDX55_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
.id = SDX55_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ecc_cfg = {
.name = "qhs_ecc_cfg",
.id = SDX55_SLAVE_ECC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emac_cfg = {
.name = "qhs_emac_cfg",
.id = SDX55_SLAVE_EMAC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SDX55_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SDX55_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
.id = SDX55_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_parf = {
.name = "qhs_pcie_parf",
.id = SDX55_SLAVE_PCIE_PARF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SDX55_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
.id = SDX55_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SDX55_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qpic = {
.name = "qhs_qpic",
.id = SDX55_SLAVE_QPIC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc1 = {
.name = "qhs_sdc1",
.id = SDX55_SLAVE_SDCC_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
.id = SDX55_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDX55_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qhs_spmi_fetcher = {
.name = "qhs_spmi_fetcher",
.id = SDX55_SLAVE_SPMI_FETCHER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_spmi_vgi_coex = {
.name = "qhs_spmi_vgi_coex",
.id = SDX55_SLAVE_SPMI_VGI_COEX,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SDX55_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
.id = SDX55_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3 = {
.name = "qhs_usb3",
.id = SDX55_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_phy = {
.name = "qhs_usb3_phy",
.id = SDX55_SLAVE_USB3_PHY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_aggre_noc = {
.name = "qns_aggre_noc",
.id = SDX55_SLAVE_ANOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX55_MASTER_ANOC_SNOC },
};
static struct qcom_icc_node qns_snoc_memnoc = {
.name = "qns_snoc_memnoc",
.id = SDX55_SLAVE_SNOC_MEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX55_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SDX55_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SDX55_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_pcie = {
.name = "xs_pcie",
.id = SDX55_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SDX55_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SDX55_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_pn0 = {
.name = "PN0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qhm_snoc_cfg },
};
static struct qcom_icc_bcm bcm_sh3 = {
.name = "SH3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xm_apps_rdwr },
};
static struct qcom_icc_bcm bcm_sh4 = {
.name = "SH4",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qns_memnoc_snoc, &qns_sys_pcie },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_snoc_memnoc },
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_imem },
};
static struct qcom_icc_bcm bcm_pn1 = {
.name = "PN1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xm_sdc1 },
};
static struct qcom_icc_bcm bcm_pn2 = {
.name = "PN2",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qhm_audio, &qhm_spmi_fetcher1 },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_qdss_stm },
};
static struct qcom_icc_bcm bcm_pn3 = {
.name = "PN3",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qhm_blsp1, &qhm_qpic },
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_sys_tcu_cfg },
};
static struct qcom_icc_bcm bcm_pn5 = {
.name = "PN5",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_sn6 = {
.name = "SN6",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_pcie },
};
static struct qcom_icc_bcm bcm_sn7 = {
.name = "SN7",
.keepalive = false,
.num_nodes = 5,
.nodes = { &qnm_aggre_noc, &xm_emac, &xm_emac, &xm_usb3, &qns_aggre_noc },
};
static struct qcom_icc_bcm bcm_sn8 = {
.name = "SN8",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qhm_qdss_bam, &xm_qdss_etr },
};
static struct qcom_icc_bcm bcm_sn9 = {
.name = "SN9",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_memnoc },
};
static struct qcom_icc_bcm bcm_sn10 = {
.name = "SN10",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_memnoc_pcie },
};
static struct qcom_icc_bcm bcm_sn11 = {
.name = "SN11",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qnm_ipa, &xm_ipa2pcie_slv },
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_mc0,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI_CH0] = &ebi,
};
static const struct qcom_icc_desc sdx55_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm * const mem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh3,
&bcm_sh4,
};
static struct qcom_icc_node * const mem_noc_nodes[] = {
[MASTER_TCU_0] = &acm_tcu,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_AMPSS_M0] = &xm_apps_rdwr,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_sys_pcie,
};
static const struct qcom_icc_desc sdx55_mem_noc = {
.nodes = mem_noc_nodes,
.num_nodes = ARRAY_SIZE(mem_noc_nodes),
.bcms = mem_noc_bcms,
.num_bcms = ARRAY_SIZE(mem_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_ce0,
&bcm_pn0,
&bcm_pn1,
&bcm_pn2,
&bcm_pn3,
&bcm_pn5,
&bcm_sn0,
&bcm_sn1,
&bcm_sn3,
&bcm_sn4,
&bcm_sn6,
&bcm_sn7,
&bcm_sn8,
&bcm_sn9,
&bcm_sn10,
&bcm_sn11,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_AUDIO] = &qhm_audio,
[MASTER_BLSP_1] = &qhm_blsp1,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QPIC] = &qhm_qpic,
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[MASTER_SPMI_FETCHER] = &qhm_spmi_fetcher1,
[MASTER_ANOC_SNOC] = &qnm_aggre_noc,
[MASTER_IPA] = &qnm_ipa,
[MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
[MASTER_MEM_NOC_PCIE_SNOC] = &qnm_memnoc_pcie,
[MASTER_CRYPTO_CORE_0] = &qxm_crypto,
[MASTER_EMAC] = &xm_emac,
[MASTER_IPA_PCIE] = &xm_ipa2pcie_slv,
[MASTER_PCIE] = &xm_pcie,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[MASTER_SDCC_1] = &xm_sdc1,
[MASTER_USB3] = &xm_usb3,
[SLAVE_AOP] = &qhs_aop,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_APPSS] = &qhs_apss,
[SLAVE_AUDIO] = &qhs_audio,
[SLAVE_BLSP_1] = &qhs_blsp1,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
[SLAVE_ECC_CFG] = &qhs_ecc_cfg,
[SLAVE_EMAC_CFG] = &qhs_emac_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_CNOC_MSS] = &qhs_mss_cfg,
[SLAVE_PCIE_PARF] = &qhs_pcie_parf,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QPIC] = &qhs_qpic,
[SLAVE_SDCC_1] = &qhs_sdc1,
[SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
[SLAVE_SPMI_FETCHER] = &qhs_spmi_fetcher,
[SLAVE_SPMI_VGI_COEX] = &qhs_spmi_vgi_coex,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM] = &qhs_tlmm,
[SLAVE_USB3] = &qhs_usb3,
[SLAVE_USB3_PHY_CFG] = &qhs_usb3_phy,
[SLAVE_ANOC_SNOC] = &qns_aggre_noc,
[SLAVE_SNOC_MEM_NOC_GC] = &qns_snoc_memnoc,
[SLAVE_OCIMEM] = &qxs_imem,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
[SLAVE_PCIE_0] = &xs_pcie,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sdx55_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sdx55-mc-virt",
.data = &sdx55_mc_virt},
{ .compatible = "qcom,sdx55-mem-noc",
.data = &sdx55_mem_noc},
{ .compatible = "qcom,sdx55-system-noc",
.data = &sdx55_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sdx55",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
MODULE_DESCRIPTION("Qualcomm SDX55 NoC driver");
MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/sdx55.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sdm845.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sdm845.h"
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
.id = SDM845_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_SLAVE_SERVICE_A1NOC },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.id = SDM845_MASTER_BLSP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_tsif = {
.name = "qhm_tsif",
.id = SDM845_MASTER_TSIF,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
.id = SDM845_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
.id = SDM845_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_card = {
.name = "xm_ufs_card",
.id = SDM845_MASTER_UFS_CARD,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SDM845_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_pcie_0 = {
.name = "xm_pcie_0",
.id = SDM845_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
.id = SDM845_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_SLAVE_SERVICE_A2NOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SDM845_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
.id = SDM845_MASTER_BLSP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qnm_cnoc = {
.name = "qnm_cnoc",
.id = SDM845_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SDM845_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SDM845_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
.id = SDM845_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_ANOC_PCIE_SNOC },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
.id = SDM845_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.id = SDM845_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
.id = SDM845_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
.name = "qxm_camnoc_hf0_uncomp",
.id = SDM845_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
.name = "qxm_camnoc_hf1_uncomp",
.id = SDM845_MASTER_CAMNOC_HF1_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
.name = "qxm_camnoc_sf_uncomp",
.id = SDM845_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qhm_spdm = {
.name = "qhm_spdm",
.id = SDM845_MASTER_SPDM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_SLAVE_CNOC_A2NOC },
};
static struct qcom_icc_node qhm_tic = {
.name = "qhm_tic",
.id = SDM845_MASTER_TIC,
.channels = 1,
.buswidth = 4,
.num_links = 43,
.links = { SDM845_SLAVE_A1NOC_CFG,
SDM845_SLAVE_A2NOC_CFG,
SDM845_SLAVE_AOP,
SDM845_SLAVE_AOSS,
SDM845_SLAVE_CAMERA_CFG,
SDM845_SLAVE_CLK_CTL,
SDM845_SLAVE_CDSP_CFG,
SDM845_SLAVE_RBCPR_CX_CFG,
SDM845_SLAVE_CRYPTO_0_CFG,
SDM845_SLAVE_DCC_CFG,
SDM845_SLAVE_CNOC_DDRSS,
SDM845_SLAVE_DISPLAY_CFG,
SDM845_SLAVE_GLM,
SDM845_SLAVE_GFX3D_CFG,
SDM845_SLAVE_IMEM_CFG,
SDM845_SLAVE_IPA_CFG,
SDM845_SLAVE_CNOC_MNOC_CFG,
SDM845_SLAVE_PCIE_0_CFG,
SDM845_SLAVE_PCIE_1_CFG,
SDM845_SLAVE_PDM,
SDM845_SLAVE_SOUTH_PHY_CFG,
SDM845_SLAVE_PIMEM_CFG,
SDM845_SLAVE_PRNG,
SDM845_SLAVE_QDSS_CFG,
SDM845_SLAVE_BLSP_2,
SDM845_SLAVE_BLSP_1,
SDM845_SLAVE_SDCC_2,
SDM845_SLAVE_SDCC_4,
SDM845_SLAVE_SNOC_CFG,
SDM845_SLAVE_SPDM_WRAPPER,
SDM845_SLAVE_SPSS_CFG,
SDM845_SLAVE_TCSR,
SDM845_SLAVE_TLMM_NORTH,
SDM845_SLAVE_TLMM_SOUTH,
SDM845_SLAVE_TSIF,
SDM845_SLAVE_UFS_CARD_CFG,
SDM845_SLAVE_UFS_MEM_CFG,
SDM845_SLAVE_USB3_0,
SDM845_SLAVE_USB3_1,
SDM845_SLAVE_VENUS_CFG,
SDM845_SLAVE_VSENSE_CTRL_CFG,
SDM845_SLAVE_CNOC_A2NOC,
SDM845_SLAVE_SERVICE_CNOC
},
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
.id = SDM845_MASTER_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 42,
.links = { SDM845_SLAVE_A1NOC_CFG,
SDM845_SLAVE_A2NOC_CFG,
SDM845_SLAVE_AOP,
SDM845_SLAVE_AOSS,
SDM845_SLAVE_CAMERA_CFG,
SDM845_SLAVE_CLK_CTL,
SDM845_SLAVE_CDSP_CFG,
SDM845_SLAVE_RBCPR_CX_CFG,
SDM845_SLAVE_CRYPTO_0_CFG,
SDM845_SLAVE_DCC_CFG,
SDM845_SLAVE_CNOC_DDRSS,
SDM845_SLAVE_DISPLAY_CFG,
SDM845_SLAVE_GLM,
SDM845_SLAVE_GFX3D_CFG,
SDM845_SLAVE_IMEM_CFG,
SDM845_SLAVE_IPA_CFG,
SDM845_SLAVE_CNOC_MNOC_CFG,
SDM845_SLAVE_PCIE_0_CFG,
SDM845_SLAVE_PCIE_1_CFG,
SDM845_SLAVE_PDM,
SDM845_SLAVE_SOUTH_PHY_CFG,
SDM845_SLAVE_PIMEM_CFG,
SDM845_SLAVE_PRNG,
SDM845_SLAVE_QDSS_CFG,
SDM845_SLAVE_BLSP_2,
SDM845_SLAVE_BLSP_1,
SDM845_SLAVE_SDCC_2,
SDM845_SLAVE_SDCC_4,
SDM845_SLAVE_SNOC_CFG,
SDM845_SLAVE_SPDM_WRAPPER,
SDM845_SLAVE_SPSS_CFG,
SDM845_SLAVE_TCSR,
SDM845_SLAVE_TLMM_NORTH,
SDM845_SLAVE_TLMM_SOUTH,
SDM845_SLAVE_TSIF,
SDM845_SLAVE_UFS_CARD_CFG,
SDM845_SLAVE_UFS_MEM_CFG,
SDM845_SLAVE_USB3_0,
SDM845_SLAVE_USB3_1,
SDM845_SLAVE_VENUS_CFG,
SDM845_SLAVE_VSENSE_CTRL_CFG,
SDM845_SLAVE_SERVICE_CNOC
},
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
.id = SDM845_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 43,
.links = { SDM845_SLAVE_A1NOC_CFG,
SDM845_SLAVE_A2NOC_CFG,
SDM845_SLAVE_AOP,
SDM845_SLAVE_AOSS,
SDM845_SLAVE_CAMERA_CFG,
SDM845_SLAVE_CLK_CTL,
SDM845_SLAVE_CDSP_CFG,
SDM845_SLAVE_RBCPR_CX_CFG,
SDM845_SLAVE_CRYPTO_0_CFG,
SDM845_SLAVE_DCC_CFG,
SDM845_SLAVE_CNOC_DDRSS,
SDM845_SLAVE_DISPLAY_CFG,
SDM845_SLAVE_GLM,
SDM845_SLAVE_GFX3D_CFG,
SDM845_SLAVE_IMEM_CFG,
SDM845_SLAVE_IPA_CFG,
SDM845_SLAVE_CNOC_MNOC_CFG,
SDM845_SLAVE_PCIE_0_CFG,
SDM845_SLAVE_PCIE_1_CFG,
SDM845_SLAVE_PDM,
SDM845_SLAVE_SOUTH_PHY_CFG,
SDM845_SLAVE_PIMEM_CFG,
SDM845_SLAVE_PRNG,
SDM845_SLAVE_QDSS_CFG,
SDM845_SLAVE_BLSP_2,
SDM845_SLAVE_BLSP_1,
SDM845_SLAVE_SDCC_2,
SDM845_SLAVE_SDCC_4,
SDM845_SLAVE_SNOC_CFG,
SDM845_SLAVE_SPDM_WRAPPER,
SDM845_SLAVE_SPSS_CFG,
SDM845_SLAVE_TCSR,
SDM845_SLAVE_TLMM_NORTH,
SDM845_SLAVE_TLMM_SOUTH,
SDM845_SLAVE_TSIF,
SDM845_SLAVE_UFS_CARD_CFG,
SDM845_SLAVE_UFS_MEM_CFG,
SDM845_SLAVE_USB3_0,
SDM845_SLAVE_USB3_1,
SDM845_SLAVE_VENUS_CFG,
SDM845_SLAVE_VSENSE_CTRL_CFG,
SDM845_SLAVE_CNOC_A2NOC,
SDM845_SLAVE_SERVICE_CNOC
},
};
static struct qcom_icc_node qhm_cnoc = {
.name = "qhm_cnoc",
.id = SDM845_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SDM845_SLAVE_LLCC_CFG,
SDM845_SLAVE_MEM_NOC_CFG
},
};
static struct qcom_icc_node acm_l3 = {
.name = "acm_l3",
.id = SDM845_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SDM845_SLAVE_GNOC_SNOC,
SDM845_SLAVE_GNOC_MEM_NOC,
SDM845_SLAVE_SERVICE_GNOC
},
};
static struct qcom_icc_node pm_gnoc_cfg = {
.name = "pm_gnoc_cfg",
.id = SDM845_MASTER_GNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_SLAVE_SERVICE_GNOC },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SDM845_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_SLAVE_EBI1 },
};
static struct qcom_icc_node acm_tcu = {
.name = "acm_tcu",
.id = SDM845_MASTER_TCU_0,
.channels = 1,
.buswidth = 8,
.num_links = 3,
.links = { SDM845_SLAVE_MEM_NOC_GNOC,
SDM845_SLAVE_LLCC,
SDM845_SLAVE_MEM_NOC_SNOC
},
};
static struct qcom_icc_node qhm_memnoc_cfg = {
.name = "qhm_memnoc_cfg",
.id = SDM845_MASTER_MEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SDM845_SLAVE_MSS_PROC_MS_MPU_CFG,
SDM845_SLAVE_SERVICE_MEM_NOC
},
};
static struct qcom_icc_node qnm_apps = {
.name = "qnm_apps",
.id = SDM845_MASTER_GNOC_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SDM845_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SDM845_SLAVE_MEM_NOC_GNOC,
SDM845_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SDM845_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 3,
.links = { SDM845_SLAVE_MEM_NOC_GNOC,
SDM845_SLAVE_LLCC,
SDM845_SLAVE_MEM_NOC_SNOC
},
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SDM845_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SDM845_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
.links = { SDM845_SLAVE_MEM_NOC_GNOC,
SDM845_SLAVE_LLCC
},
};
static struct qcom_icc_node qxm_gpu = {
.name = "qxm_gpu",
.id = SDM845_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 3,
.links = { SDM845_SLAVE_MEM_NOC_GNOC,
SDM845_SLAVE_LLCC,
SDM845_SLAVE_MEM_NOC_SNOC
},
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
.id = SDM845_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_SLAVE_SERVICE_MNOC },
};
static struct qcom_icc_node qxm_camnoc_hf0 = {
.name = "qxm_camnoc_hf0",
.id = SDM845_MASTER_CAMNOC_HF0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_hf1 = {
.name = "qxm_camnoc_hf1",
.id = SDM845_MASTER_CAMNOC_HF1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
.id = SDM845_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
.id = SDM845_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp1 = {
.name = "qxm_mdp1",
.id = SDM845_MASTER_MDP1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
.id = SDM845_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_venus0 = {
.name = "qxm_venus0",
.id = SDM845_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_venus1 = {
.name = "qxm_venus1",
.id = SDM845_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_venus_arm9 = {
.name = "qxm_venus_arm9",
.id = SDM845_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
.id = SDM845_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
.id = SDM845_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 6,
.links = { SDM845_SLAVE_APPSS,
SDM845_SLAVE_SNOC_CNOC,
SDM845_SLAVE_SNOC_MEM_NOC_SF,
SDM845_SLAVE_IMEM,
SDM845_SLAVE_PIMEM,
SDM845_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
.id = SDM845_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 9,
.links = { SDM845_SLAVE_APPSS,
SDM845_SLAVE_SNOC_CNOC,
SDM845_SLAVE_SNOC_MEM_NOC_SF,
SDM845_SLAVE_IMEM,
SDM845_SLAVE_PCIE_0,
SDM845_SLAVE_PCIE_1,
SDM845_SLAVE_PIMEM,
SDM845_SLAVE_QDSS_STM,
SDM845_SLAVE_TCU
},
};
static struct qcom_icc_node qnm_gladiator_sodv = {
.name = "qnm_gladiator_sodv",
.id = SDM845_MASTER_GNOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 8,
.links = { SDM845_SLAVE_APPSS,
SDM845_SLAVE_SNOC_CNOC,
SDM845_SLAVE_IMEM,
SDM845_SLAVE_PCIE_0,
SDM845_SLAVE_PCIE_1,
SDM845_SLAVE_PIMEM,
SDM845_SLAVE_QDSS_STM,
SDM845_SLAVE_TCU
},
};
static struct qcom_icc_node qnm_memnoc = {
.name = "qnm_memnoc",
.id = SDM845_MASTER_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 5,
.links = { SDM845_SLAVE_APPSS,
SDM845_SLAVE_SNOC_CNOC,
SDM845_SLAVE_IMEM,
SDM845_SLAVE_PIMEM,
SDM845_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qnm_pcie_anoc = {
.name = "qnm_pcie_anoc",
.id = SDM845_MASTER_ANOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 5,
.links = { SDM845_SLAVE_APPSS,
SDM845_SLAVE_SNOC_CNOC,
SDM845_SLAVE_SNOC_MEM_NOC_SF,
SDM845_SLAVE_IMEM,
SDM845_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.id = SDM845_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SDM845_SLAVE_SNOC_MEM_NOC_GC,
SDM845_SLAVE_IMEM
},
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = SDM845_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SDM845_SLAVE_SNOC_MEM_NOC_GC,
SDM845_SLAVE_IMEM
},
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SDM845_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SDM845_MASTER_A1NOC_SNOC },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
.id = SDM845_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { 0 },
};
static struct qcom_icc_node qns_pcie_a1noc_snoc = {
.name = "qns_pcie_a1noc_snoc",
.id = SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SDM845_MASTER_ANOC_PCIE_SNOC },
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
.id = SDM845_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SDM845_MASTER_A2NOC_SNOC },
};
static struct qcom_icc_node qns_pcie_snoc = {
.name = "qns_pcie_snoc",
.id = SDM845_SLAVE_ANOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SDM845_MASTER_ANOC_PCIE_SNOC },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
.id = SDM845_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_camnoc_uncomp = {
.name = "qns_camnoc_uncomp",
.id = SDM845_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
.id = SDM845_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_MASTER_A1NOC_CFG },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
.id = SDM845_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_MASTER_A2NOC_CFG },
};
static struct qcom_icc_node qhs_aop = {
.name = "qhs_aop",
.id = SDM845_SLAVE_AOP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SDM845_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
.id = SDM845_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SDM845_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute_dsp_cfg = {
.name = "qhs_compute_dsp_cfg",
.id = SDM845_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = SDM845_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SDM845_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
.id = SDM845_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_MASTER_CNOC_DC_NOC },
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
.id = SDM845_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
.id = SDM845_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_glm = {
.name = "qhs_glm",
.id = SDM845_SLAVE_GLM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
.id = SDM845_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SDM845_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SDM845_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
.id = SDM845_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_MASTER_CNOC_MNOC_CFG },
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
.id = SDM845_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_gen3_cfg = {
.name = "qhs_pcie_gen3_cfg",
.id = SDM845_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SDM845_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_phy_refgen_south = {
.name = "qhs_phy_refgen_south",
.id = SDM845_SLAVE_SOUTH_PHY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = SDM845_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
.id = SDM845_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SDM845_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_north = {
.name = "qhs_qupv3_north",
.id = SDM845_SLAVE_BLSP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_south = {
.name = "qhs_qupv3_south",
.id = SDM845_SLAVE_BLSP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
.id = SDM845_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
.id = SDM845_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
.id = SDM845_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qhs_spdm = {
.name = "qhs_spdm",
.id = SDM845_SLAVE_SPDM_WRAPPER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
.id = SDM845_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SDM845_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_north = {
.name = "qhs_tlmm_north",
.id = SDM845_SLAVE_TLMM_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_south = {
.name = "qhs_tlmm_south",
.id = SDM845_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tsif = {
.name = "qhs_tsif",
.id = SDM845_SLAVE_TSIF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_card_cfg = {
.name = "qhs_ufs_card_cfg",
.id = SDM845_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
.id = SDM845_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
.id = SDM845_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_1 = {
.name = "qhs_usb3_1",
.id = SDM845_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
.id = SDM845_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
.id = SDM845_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cnoc_a2noc = {
.name = "qns_cnoc_a2noc",
.id = SDM845_SLAVE_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_MASTER_CNOC_A2NOC },
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
.id = SDM845_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
.id = SDM845_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_memnoc = {
.name = "qhs_memnoc",
.id = SDM845_SLAVE_MEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDM845_MASTER_MEM_NOC_CFG },
};
static struct qcom_icc_node qns_gladiator_sodv = {
.name = "qns_gladiator_sodv",
.id = SDM845_SLAVE_GNOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_MASTER_GNOC_SNOC },
};
static struct qcom_icc_node qns_gnoc_memnoc = {
.name = "qns_gnoc_memnoc",
.id = SDM845_SLAVE_GNOC_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_MASTER_GNOC_MEM_NOC },
};
static struct qcom_icc_node srvc_gnoc = {
.name = "srvc_gnoc",
.id = SDM845_SLAVE_SERVICE_GNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SDM845_SLAVE_EBI1,
.channels = 4,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
.id = SDM845_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_apps_io = {
.name = "qns_apps_io",
.id = SDM845_SLAVE_MEM_NOC_GNOC,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SDM845_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
.links = { SDM845_MASTER_LLCC },
};
static struct qcom_icc_node qns_memnoc_snoc = {
.name = "qns_memnoc_snoc",
.id = SDM845_SLAVE_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_MASTER_MEM_NOC_SNOC },
};
static struct qcom_icc_node srvc_memnoc = {
.name = "srvc_memnoc",
.id = SDM845_SLAVE_SERVICE_MEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns2_mem_noc = {
.name = "qns2_mem_noc",
.id = SDM845_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_MASTER_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
.id = SDM845_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SDM845_MASTER_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
.id = SDM845_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SDM845_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
.id = SDM845_SLAVE_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_MASTER_SNOC_CNOC },
};
static struct qcom_icc_node qns_memnoc_gc = {
.name = "qns_memnoc_gc",
.id = SDM845_SLAVE_SNOC_MEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDM845_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_memnoc_sf = {
.name = "qns_memnoc_sf",
.id = SDM845_SLAVE_SNOC_MEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SDM845_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SDM845_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pcie = {
.name = "qxs_pcie",
.id = SDM845_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pcie_gen3 = {
.name = "qxs_pcie_gen3",
.id = SDM845_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
.id = SDM845_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SDM845_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SDM845_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SDM845_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_bcm bcm_sh1 = {
.name = "SH1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_apps_io },
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.keepalive = true,
.num_nodes = 7,
.nodes = { &qxm_camnoc_hf0_uncomp,
&qxm_camnoc_hf1_uncomp,
&qxm_camnoc_sf_uncomp,
&qxm_camnoc_hf0,
&qxm_camnoc_hf1,
&qxm_mdp0,
&qxm_mdp1
},
};
static struct qcom_icc_bcm bcm_sh2 = {
.name = "SH2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_memnoc_snoc },
};
static struct qcom_icc_bcm bcm_mm2 = {
.name = "MM2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns2_mem_noc },
};
static struct qcom_icc_bcm bcm_sh3 = {
.name = "SH3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &acm_tcu },
};
static struct qcom_icc_bcm bcm_mm3 = {
.name = "MM3",
.keepalive = false,
.num_nodes = 5,
.nodes = { &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
};
static struct qcom_icc_bcm bcm_sh5 = {
.name = "SH5",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_apps },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_memnoc_sf },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.keepalive = false,
.num_nodes = 47,
.nodes = { &qhm_spdm,
&qhm_tic,
&qnm_snoc,
&xm_qdss_dap,
&qhs_a1_noc_cfg,
&qhs_a2_noc_cfg,
&qhs_aop,
&qhs_aoss,
&qhs_camera_cfg,
&qhs_clk_ctl,
&qhs_compute_dsp_cfg,
&qhs_cpr_cx,
&qhs_crypto0_cfg,
&qhs_dcc_cfg,
&qhs_ddrss_cfg,
&qhs_display_cfg,
&qhs_glm,
&qhs_gpuss_cfg,
&qhs_imem_cfg,
&qhs_ipa,
&qhs_mnoc_cfg,
&qhs_pcie0_cfg,
&qhs_pcie_gen3_cfg,
&qhs_pdm,
&qhs_phy_refgen_south,
&qhs_pimem_cfg,
&qhs_prng,
&qhs_qdss_cfg,
&qhs_qupv3_north,
&qhs_qupv3_south,
&qhs_sdc2,
&qhs_sdc4,
&qhs_snoc_cfg,
&qhs_spdm,
&qhs_spss_cfg,
&qhs_tcsr,
&qhs_tlmm_north,
&qhs_tlmm_south,
&qhs_tsif,
&qhs_ufs_card_cfg,
&qhs_ufs_mem_cfg,
&qhs_usb3_0,
&qhs_usb3_1,
&qhs_venus_cfg,
&qhs_vsense_ctrl_cfg,
&qns_cnoc_a2noc,
&srvc_cnoc
},
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qhm_qup1, &qhm_qup2 },
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_imem },
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_memnoc_gc },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_cnoc },
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_pimem },
};
static struct qcom_icc_bcm bcm_sn5 = {
.name = "SN5",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_qdss_stm },
};
static struct qcom_icc_bcm bcm_sn6 = {
.name = "SN6",
.keepalive = false,
.num_nodes = 3,
.nodes = { &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg },
};
static struct qcom_icc_bcm bcm_sn7 = {
.name = "SN7",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_pcie },
};
static struct qcom_icc_bcm bcm_sn8 = {
.name = "SN8",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_pcie_gen3 },
};
static struct qcom_icc_bcm bcm_sn9 = {
.name = "SN9",
.keepalive = false,
.num_nodes = 2,
.nodes = { &srvc_aggre1_noc, &qnm_aggre1_noc },
};
static struct qcom_icc_bcm bcm_sn11 = {
.name = "SN11",
.keepalive = false,
.num_nodes = 2,
.nodes = { &srvc_aggre2_noc, &qnm_aggre2_noc },
};
static struct qcom_icc_bcm bcm_sn12 = {
.name = "SN12",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qnm_gladiator_sodv, &xm_gic },
};
static struct qcom_icc_bcm bcm_sn14 = {
.name = "SN14",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_pcie_anoc },
};
static struct qcom_icc_bcm bcm_sn15 = {
.name = "SN15",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_memnoc },
};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn9,
&bcm_qup0,
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_TSIF] = &qhm_tsif,
[MASTER_SDCC_2] = &xm_sdc2,
[MASTER_SDCC_4] = &xm_sdc4,
[MASTER_UFS_CARD] = &xm_ufs_card,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[MASTER_PCIE_0] = &xm_pcie_0,
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
[SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
[MASTER_QUP_1] = &qhm_qup1,
};
static const struct qcom_icc_desc sdm845_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_sn11,
&bcm_qup0,
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_CNOC_A2NOC] = &qnm_cnoc,
[MASTER_CRYPTO] = &qxm_crypto,
[MASTER_IPA] = &qxm_ipa,
[MASTER_PCIE_1] = &xm_pcie3_1,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[MASTER_USB3_0] = &xm_usb3_0,
[MASTER_USB3_1] = &xm_usb3_1,
[SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
[SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
[MASTER_QUP_2] = &qhm_qup2,
};
static const struct qcom_icc_desc sdm845_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_SPDM] = &qhm_spdm,
[MASTER_TIC] = &qhm_tic,
[MASTER_SNOC_CNOC] = &qnm_snoc,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
[SLAVE_AOP] = &qhs_aop,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_DCC_CFG] = &qhs_dcc_cfg,
[SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
[SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
[SLAVE_GLM] = &qhs_glm,
[SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
[SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
[SLAVE_PCIE_1_CFG] = &qhs_pcie_gen3_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_SOUTH_PHY_CFG] = &qhs_phy_refgen_south,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_BLSP_2] = &qhs_qupv3_north,
[SLAVE_BLSP_1] = &qhs_qupv3_south,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SDCC_4] = &qhs_sdc4,
[SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
[SLAVE_SPDM_WRAPPER] = &qhs_spdm,
[SLAVE_SPSS_CFG] = &qhs_spss_cfg,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM_NORTH] = &qhs_tlmm_north,
[SLAVE_TLMM_SOUTH] = &qhs_tlmm_south,
[SLAVE_TSIF] = &qhs_tsif,
[SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB3_0] = &qhs_usb3_0,
[SLAVE_USB3_1] = &qhs_usb3_1,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
static const struct qcom_icc_desc sdm845_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
};
static const struct qcom_icc_desc sdm845_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
static struct qcom_icc_bcm * const gladiator_noc_bcms[] = {
};
static struct qcom_icc_node * const gladiator_noc_nodes[] = {
[MASTER_APPSS_PROC] = &acm_l3,
[MASTER_GNOC_CFG] = &pm_gnoc_cfg,
[SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
[SLAVE_GNOC_MEM_NOC] = &qns_gnoc_memnoc,
[SLAVE_SERVICE_GNOC] = &srvc_gnoc,
};
static const struct qcom_icc_desc sdm845_gladiator_noc = {
.nodes = gladiator_noc_nodes,
.num_nodes = ARRAY_SIZE(gladiator_noc_nodes),
.bcms = gladiator_noc_bcms,
.num_bcms = ARRAY_SIZE(gladiator_noc_bcms),
};
static struct qcom_icc_bcm * const mem_noc_bcms[] = {
&bcm_mc0,
&bcm_acv,
&bcm_sh0,
&bcm_sh1,
&bcm_sh2,
&bcm_sh3,
&bcm_sh5,
};
static struct qcom_icc_node * const mem_noc_nodes[] = {
[MASTER_TCU_0] = &acm_tcu,
[MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
[MASTER_GNOC_MEM_NOC] = &qnm_apps,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[MASTER_GFX3D] = &qxm_gpu,
[SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
[SLAVE_MEM_NOC_GNOC] = &qns_apps_io,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
[SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc,
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
static const struct qcom_icc_desc sdm845_mem_noc = {
.nodes = mem_noc_nodes,
.num_nodes = ARRAY_SIZE(mem_noc_nodes),
.bcms = mem_noc_bcms,
.num_bcms = ARRAY_SIZE(mem_noc_bcms),
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
&bcm_mm3,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
[MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
[MASTER_MDP0] = &qxm_mdp0,
[MASTER_MDP1] = &qxm_mdp1,
[MASTER_ROTATOR] = &qxm_rot,
[MASTER_VIDEO_P0] = &qxm_venus0,
[MASTER_VIDEO_P1] = &qxm_venus1,
[MASTER_VIDEO_PROC] = &qxm_venus_arm9,
[SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
[MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
[SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
};
static const struct qcom_icc_desc sdm845_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
&bcm_sn3,
&bcm_sn4,
&bcm_sn5,
&bcm_sn6,
&bcm_sn7,
&bcm_sn8,
&bcm_sn9,
&bcm_sn11,
&bcm_sn12,
&bcm_sn14,
&bcm_sn15,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_GNOC_SNOC] = &qnm_gladiator_sodv,
[MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
[MASTER_ANOC_PCIE_SNOC] = &qnm_pcie_anoc,
[MASTER_PIMEM] = &qxm_pimem,
[MASTER_GIC] = &xm_gic,
[SLAVE_APPSS] = &qhs_apss,
[SLAVE_SNOC_CNOC] = &qns_cnoc,
[SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc,
[SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf,
[SLAVE_IMEM] = &qxs_imem,
[SLAVE_PCIE_0] = &qxs_pcie,
[SLAVE_PCIE_1] = &qxs_pcie_gen3,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sdm845_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sdm845-aggre1-noc",
.data = &sdm845_aggre1_noc},
{ .compatible = "qcom,sdm845-aggre2-noc",
.data = &sdm845_aggre2_noc},
{ .compatible = "qcom,sdm845-config-noc",
.data = &sdm845_config_noc},
{ .compatible = "qcom,sdm845-dc-noc",
.data = &sdm845_dc_noc},
{ .compatible = "qcom,sdm845-gladiator-noc",
.data = &sdm845_gladiator_noc},
{ .compatible = "qcom,sdm845-mem-noc",
.data = &sdm845_mem_noc},
{ .compatible = "qcom,sdm845-mmss-noc",
.data = &sdm845_mmss_noc},
{ .compatible = "qcom,sdm845-system-noc",
.data = &sdm845_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sdm845",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
MODULE_AUTHOR("David Dai <[email protected]>");
MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/sdm845.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sdx65.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sdx65.h"
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SDX65_MASTER_LLCC,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDX65_SLAVE_EBI1 },
};
static struct qcom_icc_node acm_tcu = {
.name = "acm_tcu",
.id = SDX65_MASTER_TCU_0,
.channels = 1,
.buswidth = 8,
.num_links = 3,
.links = { SDX65_SLAVE_LLCC,
SDX65_SLAVE_MEM_NOC_SNOC,
SDX65_SLAVE_MEM_NOC_PCIE_SNOC
},
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SDX65_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SDX65_SLAVE_LLCC },
};
static struct qcom_icc_node xm_apps_rdwr = {
.name = "xm_apps_rdwr",
.id = SDX65_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SDX65_SLAVE_LLCC,
SDX65_SLAVE_MEM_NOC_SNOC,
SDX65_SLAVE_MEM_NOC_PCIE_SNOC
},
};
static struct qcom_icc_node qhm_audio = {
.name = "qhm_audio",
.id = SDX65_MASTER_AUDIO,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDX65_SLAVE_ANOC_SNOC },
};
static struct qcom_icc_node qhm_blsp1 = {
.name = "qhm_blsp1",
.id = SDX65_MASTER_BLSP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDX65_SLAVE_ANOC_SNOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SDX65_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 26,
.links = { SDX65_SLAVE_AOSS,
SDX65_SLAVE_AUDIO,
SDX65_SLAVE_BLSP_1,
SDX65_SLAVE_CLK_CTL,
SDX65_SLAVE_CRYPTO_0_CFG,
SDX65_SLAVE_CNOC_DDRSS,
SDX65_SLAVE_ECC_CFG,
SDX65_SLAVE_IMEM_CFG,
SDX65_SLAVE_IPA_CFG,
SDX65_SLAVE_CNOC_MSS,
SDX65_SLAVE_PCIE_PARF,
SDX65_SLAVE_PDM,
SDX65_SLAVE_PRNG,
SDX65_SLAVE_QDSS_CFG,
SDX65_SLAVE_QPIC,
SDX65_SLAVE_SDCC_1,
SDX65_SLAVE_SNOC_CFG,
SDX65_SLAVE_SPMI_FETCHER,
SDX65_SLAVE_SPMI_VGI_COEX,
SDX65_SLAVE_TCSR,
SDX65_SLAVE_TLMM,
SDX65_SLAVE_USB3,
SDX65_SLAVE_USB3_PHY_CFG,
SDX65_SLAVE_SNOC_MEM_NOC_GC,
SDX65_SLAVE_IMEM,
SDX65_SLAVE_TCU
},
};
static struct qcom_icc_node qhm_qpic = {
.name = "qhm_qpic",
.id = SDX65_MASTER_QPIC,
.channels = 1,
.buswidth = 4,
.num_links = 4,
.links = { SDX65_SLAVE_AOSS,
SDX65_SLAVE_AUDIO,
SDX65_SLAVE_IPA_CFG,
SDX65_SLAVE_ANOC_SNOC
},
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
.id = SDX65_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDX65_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qhm_spmi_fetcher1 = {
.name = "qhm_spmi_fetcher1",
.id = SDX65_MASTER_SPMI_FETCHER,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SDX65_SLAVE_AOSS,
SDX65_SLAVE_ANOC_SNOC
},
};
static struct qcom_icc_node qnm_aggre_noc = {
.name = "qnm_aggre_noc",
.id = SDX65_MASTER_ANOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 29,
.links = { SDX65_SLAVE_AOSS,
SDX65_SLAVE_APPSS,
SDX65_SLAVE_AUDIO,
SDX65_SLAVE_BLSP_1,
SDX65_SLAVE_CLK_CTL,
SDX65_SLAVE_CRYPTO_0_CFG,
SDX65_SLAVE_CNOC_DDRSS,
SDX65_SLAVE_ECC_CFG,
SDX65_SLAVE_IMEM_CFG,
SDX65_SLAVE_IPA_CFG,
SDX65_SLAVE_CNOC_MSS,
SDX65_SLAVE_PCIE_PARF,
SDX65_SLAVE_PDM,
SDX65_SLAVE_PRNG,
SDX65_SLAVE_QDSS_CFG,
SDX65_SLAVE_QPIC,
SDX65_SLAVE_SDCC_1,
SDX65_SLAVE_SNOC_CFG,
SDX65_SLAVE_SPMI_FETCHER,
SDX65_SLAVE_SPMI_VGI_COEX,
SDX65_SLAVE_TCSR,
SDX65_SLAVE_TLMM,
SDX65_SLAVE_USB3,
SDX65_SLAVE_USB3_PHY_CFG,
SDX65_SLAVE_SNOC_MEM_NOC_GC,
SDX65_SLAVE_IMEM,
SDX65_SLAVE_PCIE_0,
SDX65_SLAVE_QDSS_STM,
SDX65_SLAVE_TCU
},
};
static struct qcom_icc_node qnm_ipa = {
.name = "qnm_ipa",
.id = SDX65_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 26,
.links = { SDX65_SLAVE_AOSS,
SDX65_SLAVE_AUDIO,
SDX65_SLAVE_BLSP_1,
SDX65_SLAVE_CLK_CTL,
SDX65_SLAVE_CRYPTO_0_CFG,
SDX65_SLAVE_CNOC_DDRSS,
SDX65_SLAVE_ECC_CFG,
SDX65_SLAVE_IMEM_CFG,
SDX65_SLAVE_IPA_CFG,
SDX65_SLAVE_CNOC_MSS,
SDX65_SLAVE_PCIE_PARF,
SDX65_SLAVE_PDM,
SDX65_SLAVE_PRNG,
SDX65_SLAVE_QDSS_CFG,
SDX65_SLAVE_QPIC,
SDX65_SLAVE_SDCC_1,
SDX65_SLAVE_SNOC_CFG,
SDX65_SLAVE_SPMI_FETCHER,
SDX65_SLAVE_TCSR,
SDX65_SLAVE_TLMM,
SDX65_SLAVE_USB3,
SDX65_SLAVE_USB3_PHY_CFG,
SDX65_SLAVE_SNOC_MEM_NOC_GC,
SDX65_SLAVE_IMEM,
SDX65_SLAVE_PCIE_0,
SDX65_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qnm_memnoc = {
.name = "qnm_memnoc",
.id = SDX65_MASTER_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 27,
.links = { SDX65_SLAVE_AOSS,
SDX65_SLAVE_APPSS,
SDX65_SLAVE_AUDIO,
SDX65_SLAVE_BLSP_1,
SDX65_SLAVE_CLK_CTL,
SDX65_SLAVE_CRYPTO_0_CFG,
SDX65_SLAVE_CNOC_DDRSS,
SDX65_SLAVE_ECC_CFG,
SDX65_SLAVE_IMEM_CFG,
SDX65_SLAVE_IPA_CFG,
SDX65_SLAVE_CNOC_MSS,
SDX65_SLAVE_PCIE_PARF,
SDX65_SLAVE_PDM,
SDX65_SLAVE_PRNG,
SDX65_SLAVE_QDSS_CFG,
SDX65_SLAVE_QPIC,
SDX65_SLAVE_SDCC_1,
SDX65_SLAVE_SNOC_CFG,
SDX65_SLAVE_SPMI_FETCHER,
SDX65_SLAVE_SPMI_VGI_COEX,
SDX65_SLAVE_TCSR,
SDX65_SLAVE_TLMM,
SDX65_SLAVE_USB3,
SDX65_SLAVE_USB3_PHY_CFG,
SDX65_SLAVE_IMEM,
SDX65_SLAVE_QDSS_STM,
SDX65_SLAVE_TCU
},
};
static struct qcom_icc_node qnm_memnoc_pcie = {
.name = "qnm_memnoc_pcie",
.id = SDX65_MASTER_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX65_SLAVE_PCIE_0 },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SDX65_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SDX65_SLAVE_AOSS,
SDX65_SLAVE_ANOC_SNOC
},
};
static struct qcom_icc_node xm_ipa2pcie_slv = {
.name = "xm_ipa2pcie_slv",
.id = SDX65_MASTER_IPA_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX65_SLAVE_PCIE_0 },
};
static struct qcom_icc_node xm_pcie = {
.name = "xm_pcie",
.id = SDX65_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX65_SLAVE_ANOC_SNOC },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
.id = SDX65_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 26,
.links = { SDX65_SLAVE_AOSS,
SDX65_SLAVE_AUDIO,
SDX65_SLAVE_BLSP_1,
SDX65_SLAVE_CLK_CTL,
SDX65_SLAVE_CRYPTO_0_CFG,
SDX65_SLAVE_CNOC_DDRSS,
SDX65_SLAVE_ECC_CFG,
SDX65_SLAVE_IMEM_CFG,
SDX65_SLAVE_IPA_CFG,
SDX65_SLAVE_CNOC_MSS,
SDX65_SLAVE_PCIE_PARF,
SDX65_SLAVE_PDM,
SDX65_SLAVE_PRNG,
SDX65_SLAVE_QDSS_CFG,
SDX65_SLAVE_QPIC,
SDX65_SLAVE_SDCC_1,
SDX65_SLAVE_SNOC_CFG,
SDX65_SLAVE_SPMI_FETCHER,
SDX65_SLAVE_SPMI_VGI_COEX,
SDX65_SLAVE_TCSR,
SDX65_SLAVE_TLMM,
SDX65_SLAVE_USB3,
SDX65_SLAVE_USB3_PHY_CFG,
SDX65_SLAVE_SNOC_MEM_NOC_GC,
SDX65_SLAVE_IMEM,
SDX65_SLAVE_TCU
},
};
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
.id = SDX65_MASTER_SDCC_1,
.channels = 1,
.buswidth = 8,
.num_links = 4,
.links = { SDX65_SLAVE_AOSS,
SDX65_SLAVE_AUDIO,
SDX65_SLAVE_IPA_CFG,
SDX65_SLAVE_ANOC_SNOC
},
};
static struct qcom_icc_node xm_usb3 = {
.name = "xm_usb3",
.id = SDX65_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX65_SLAVE_ANOC_SNOC },
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SDX65_SLAVE_EBI1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SDX65_SLAVE_LLCC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SDX65_MASTER_LLCC },
};
static struct qcom_icc_node qns_memnoc_snoc = {
.name = "qns_memnoc_snoc",
.id = SDX65_SLAVE_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX65_MASTER_MEM_NOC_SNOC },
};
static struct qcom_icc_node qns_sys_pcie = {
.name = "qns_sys_pcie",
.id = SDX65_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX65_MASTER_MEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SDX65_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SDX65_SLAVE_APPSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_audio = {
.name = "qhs_audio",
.id = SDX65_SLAVE_AUDIO,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_blsp1 = {
.name = "qhs_blsp1",
.id = SDX65_SLAVE_BLSP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SDX65_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SDX65_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
.id = SDX65_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ecc_cfg = {
.name = "qhs_ecc_cfg",
.id = SDX65_SLAVE_ECC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SDX65_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SDX65_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
.id = SDX65_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_parf = {
.name = "qhs_pcie_parf",
.id = SDX65_SLAVE_PCIE_PARF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SDX65_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
.id = SDX65_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SDX65_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qpic = {
.name = "qhs_qpic",
.id = SDX65_SLAVE_QPIC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc1 = {
.name = "qhs_sdc1",
.id = SDX65_SLAVE_SDCC_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
.id = SDX65_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SDX65_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qhs_spmi_fetcher = {
.name = "qhs_spmi_fetcher",
.id = SDX65_SLAVE_SPMI_FETCHER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_spmi_vgi_coex = {
.name = "qhs_spmi_vgi_coex",
.id = SDX65_SLAVE_SPMI_VGI_COEX,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SDX65_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
.id = SDX65_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3 = {
.name = "qhs_usb3",
.id = SDX65_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_phy = {
.name = "qhs_usb3_phy",
.id = SDX65_SLAVE_USB3_PHY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_aggre_noc = {
.name = "qns_aggre_noc",
.id = SDX65_SLAVE_ANOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SDX65_MASTER_ANOC_SNOC },
};
static struct qcom_icc_node qns_snoc_memnoc = {
.name = "qns_snoc_memnoc",
.id = SDX65_SLAVE_SNOC_MEM_NOC_GC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SDX65_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SDX65_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SDX65_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_pcie = {
.name = "xs_pcie",
.id = SDX65_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SDX65_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SDX65_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_pn0 = {
.name = "PN0",
.keepalive = true,
.num_nodes = 26,
.nodes = { &qhm_snoc_cfg,
&qhs_aoss,
&qhs_apss,
&qhs_audio,
&qhs_blsp1,
&qhs_clk_ctl,
&qhs_crypto0_cfg,
&qhs_ddrss_cfg,
&qhs_ecc_cfg,
&qhs_imem_cfg,
&qhs_ipa,
&qhs_mss_cfg,
&qhs_pcie_parf,
&qhs_pdm,
&qhs_prng,
&qhs_qdss_cfg,
&qhs_qpic,
&qhs_sdc1,
&qhs_snoc_cfg,
&qhs_spmi_fetcher,
&qhs_spmi_vgi_coex,
&qhs_tcsr,
&qhs_tlmm,
&qhs_usb3,
&qhs_usb3_phy,
&srvc_snoc
},
};
static struct qcom_icc_bcm bcm_pn1 = {
.name = "PN1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xm_sdc1 },
};
static struct qcom_icc_bcm bcm_pn2 = {
.name = "PN2",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qhm_audio, &qhm_spmi_fetcher1 },
};
static struct qcom_icc_bcm bcm_pn3 = {
.name = "PN3",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qhm_blsp1, &qhm_qpic },
};
static struct qcom_icc_bcm bcm_pn4 = {
.name = "PN4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_sh1 = {
.name = "SH1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_memnoc_snoc },
};
static struct qcom_icc_bcm bcm_sh3 = {
.name = "SH3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xm_apps_rdwr },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_snoc_memnoc },
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_imem },
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_qdss_stm },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_sys_tcu_cfg },
};
static struct qcom_icc_bcm bcm_sn5 = {
.name = "SN5",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_pcie },
};
static struct qcom_icc_bcm bcm_sn6 = {
.name = "SN6",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qhm_qdss_bam, &xm_qdss_etr },
};
static struct qcom_icc_bcm bcm_sn7 = {
.name = "SN7",
.keepalive = false,
.num_nodes = 4,
.nodes = { &qnm_aggre_noc, &xm_pcie, &xm_usb3, &qns_aggre_noc },
};
static struct qcom_icc_bcm bcm_sn8 = {
.name = "SN8",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_memnoc },
};
static struct qcom_icc_bcm bcm_sn9 = {
.name = "SN9",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_memnoc_pcie },
};
static struct qcom_icc_bcm bcm_sn10 = {
.name = "SN10",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qnm_ipa, &xm_ipa2pcie_slv },
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_mc0,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
static const struct qcom_icc_desc sdx65_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm * const mem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh1,
&bcm_sh3,
};
static struct qcom_icc_node * const mem_noc_nodes[] = {
[MASTER_TCU_0] = &acm_tcu,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_APPSS_PROC] = &xm_apps_rdwr,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_sys_pcie,
};
static const struct qcom_icc_desc sdx65_mem_noc = {
.nodes = mem_noc_nodes,
.num_nodes = ARRAY_SIZE(mem_noc_nodes),
.bcms = mem_noc_bcms,
.num_bcms = ARRAY_SIZE(mem_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_ce0,
&bcm_pn0,
&bcm_pn1,
&bcm_pn2,
&bcm_pn3,
&bcm_pn4,
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
&bcm_sn3,
&bcm_sn5,
&bcm_sn6,
&bcm_sn7,
&bcm_sn8,
&bcm_sn9,
&bcm_sn10,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_AUDIO] = &qhm_audio,
[MASTER_BLSP_1] = &qhm_blsp1,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QPIC] = &qhm_qpic,
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[MASTER_SPMI_FETCHER] = &qhm_spmi_fetcher1,
[MASTER_ANOC_SNOC] = &qnm_aggre_noc,
[MASTER_IPA] = &qnm_ipa,
[MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
[MASTER_MEM_NOC_PCIE_SNOC] = &qnm_memnoc_pcie,
[MASTER_CRYPTO] = &qxm_crypto,
[MASTER_IPA_PCIE] = &xm_ipa2pcie_slv,
[MASTER_PCIE_0] = &xm_pcie,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[MASTER_SDCC_1] = &xm_sdc1,
[MASTER_USB3] = &xm_usb3,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_APPSS] = &qhs_apss,
[SLAVE_AUDIO] = &qhs_audio,
[SLAVE_BLSP_1] = &qhs_blsp1,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
[SLAVE_ECC_CFG] = &qhs_ecc_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_CNOC_MSS] = &qhs_mss_cfg,
[SLAVE_PCIE_PARF] = &qhs_pcie_parf,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QPIC] = &qhs_qpic,
[SLAVE_SDCC_1] = &qhs_sdc1,
[SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
[SLAVE_SPMI_FETCHER] = &qhs_spmi_fetcher,
[SLAVE_SPMI_VGI_COEX] = &qhs_spmi_vgi_coex,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM] = &qhs_tlmm,
[SLAVE_USB3] = &qhs_usb3,
[SLAVE_USB3_PHY_CFG] = &qhs_usb3_phy,
[SLAVE_ANOC_SNOC] = &qns_aggre_noc,
[SLAVE_SNOC_MEM_NOC_GC] = &qns_snoc_memnoc,
[SLAVE_IMEM] = &qxs_imem,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
[SLAVE_PCIE_0] = &xs_pcie,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sdx65_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sdx65-mc-virt",
.data = &sdx65_mc_virt},
{ .compatible = "qcom,sdx65-mem-noc",
.data = &sdx65_mem_noc},
{ .compatible = "qcom,sdx65-system-noc",
.data = &sdx65_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sdx65",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
MODULE_DESCRIPTION("Qualcomm SDX65 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/sdx65.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,qdu1000-rpmh.h>
#include "bcm-voter.h"
#include "icc-common.h"
#include "icc-rpmh.h"
#include "qdu1000.h"
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = QDU1000_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { QDU1000_SLAVE_QUP_CORE_0 },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
.id = QDU1000_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { QDU1000_SLAVE_QUP_CORE_1 },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
.id = QDU1000_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
.id = QDU1000_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 4,
.links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC,
QDU1000_SLAVE_GEMNOC_MODEM_CNOC, QDU1000_SLAVE_MEM_NOC_PCIE_SNOC
},
};
static struct qcom_icc_node qnm_ecpri_dma = {
.name = "qnm_ecpri_dma",
.id = QDU1000_MASTER_GEMNOC_ECPRI_DMA,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_fec_2_gemnoc = {
.name = "qnm_fec_2_gemnoc",
.id = QDU1000_MASTER_FEC_2_GEMNOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
.id = QDU1000_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 64,
.num_links = 3,
.links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC,
QDU1000_SLAVE_GEMNOC_MODEM_CNOC
},
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = QDU1000_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = QDU1000_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 4,
.links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC,
QDU1000_SLAVE_GEMNOC_MODEM_CNOC, QDU1000_SLAVE_MEM_NOC_PCIE_SNOC
},
};
static struct qcom_icc_node qxm_mdsp = {
.name = "qxm_mdsp",
.id = QDU1000_MASTER_MSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC,
QDU1000_SLAVE_MEM_NOC_PCIE_SNOC
},
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = QDU1000_MASTER_LLCC,
.channels = 8,
.buswidth = 4,
.num_links = 1,
.links = { QDU1000_SLAVE_EBI1 },
};
static struct qcom_icc_node qhm_gic = {
.name = "qhm_gic",
.id = QDU1000_MASTER_GIC_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = QDU1000_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qhm_qpic = {
.name = "qhm_qpic",
.id = QDU1000_MASTER_QPIC,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { QDU1000_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
.id = QDU1000_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { QDU1000_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
.id = QDU1000_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { QDU1000_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.id = QDU1000_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { QDU1000_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_system_noc_cfg = {
.name = "qhm_system_noc_cfg",
.id = QDU1000_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { QDU1000_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qnm_aggre_noc = {
.name = "qnm_aggre_noc",
.id = QDU1000_MASTER_ANOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_aggre_noc_gsi = {
.name = "qnm_aggre_noc_gsi",
.id = QDU1000_MASTER_ANOC_GSI,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
.id = QDU1000_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 36,
.links = { QDU1000_SLAVE_AHB2PHY_SOUTH, QDU1000_SLAVE_AHB2PHY_NORTH,
QDU1000_SLAVE_AHB2PHY_EAST, QDU1000_SLAVE_AOSS,
QDU1000_SLAVE_CLK_CTL, QDU1000_SLAVE_RBCPR_CX_CFG,
QDU1000_SLAVE_RBCPR_MX_CFG, QDU1000_SLAVE_CRYPTO_0_CFG,
QDU1000_SLAVE_ECPRI_CFG, QDU1000_SLAVE_IMEM_CFG,
QDU1000_SLAVE_IPC_ROUTER_CFG, QDU1000_SLAVE_CNOC_MSS,
QDU1000_SLAVE_PCIE_CFG, QDU1000_SLAVE_PDM,
QDU1000_SLAVE_PIMEM_CFG, QDU1000_SLAVE_PRNG,
QDU1000_SLAVE_QDSS_CFG, QDU1000_SLAVE_QPIC,
QDU1000_SLAVE_QSPI_0, QDU1000_SLAVE_QUP_0,
QDU1000_SLAVE_QUP_1, QDU1000_SLAVE_SDCC_2,
QDU1000_SLAVE_SMBUS_CFG, QDU1000_SLAVE_SNOC_CFG,
QDU1000_SLAVE_TCSR, QDU1000_SLAVE_TLMM,
QDU1000_SLAVE_TME_CFG, QDU1000_SLAVE_TSC_CFG,
QDU1000_SLAVE_USB3_0, QDU1000_SLAVE_VSENSE_CTRL_CFG,
QDU1000_SLAVE_DDRSS_CFG, QDU1000_SLAVE_IMEM,
QDU1000_SLAVE_PIMEM, QDU1000_SLAVE_ETHERNET_SS,
QDU1000_SLAVE_QDSS_STM, QDU1000_SLAVE_TCU
},
};
static struct qcom_icc_node qnm_gemnoc_modem_slave = {
.name = "qnm_gemnoc_modem_slave",
.id = QDU1000_MASTER_GEMNOC_MODEM_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { QDU1000_SLAVE_MODEM_OFFLINE },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
.id = QDU1000_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { QDU1000_SLAVE_PCIE_0 },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = QDU1000_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qxm_ecpri_gsi = {
.name = "qxm_ecpri_gsi",
.id = QDU1000_MASTER_ECPRI_GSI,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { QDU1000_SLAVE_ANOC_SNOC_GSI, QDU1000_SLAVE_PCIE_0 },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.id = QDU1000_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node xm_ecpri_dma = {
.name = "xm_ecpri_dma",
.id = QDU1000_MASTER_SNOC_ECPRI_DMA,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { QDU1000_SLAVE_ECPRI_GEMNOC, QDU1000_SLAVE_PCIE_0 },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = QDU1000_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node xm_pcie = {
.name = "xm_pcie",
.id = QDU1000_MASTER_PCIE,
.channels = 1,
.buswidth = 64,
.num_links = 1,
.links = { QDU1000_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_qdss_etr0 = {
.name = "xm_qdss_etr0",
.id = QDU1000_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node xm_qdss_etr1 = {
.name = "xm_qdss_etr1",
.id = QDU1000_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node xm_sdc = {
.name = "xm_sdc",
.id = QDU1000_MASTER_SDCC_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_usb3 = {
.name = "xm_usb3",
.id = QDU1000_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = QDU1000_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
.id = QDU1000_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
.id = QDU1000_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { QDU1000_MASTER_GEM_NOC_CNOC },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = QDU1000_SLAVE_LLCC,
.channels = 8,
.buswidth = 16,
.num_links = 1,
.links = { QDU1000_MASTER_LLCC },
};
static struct qcom_icc_node qns_modem_slave = {
.name = "qns_modem_slave",
.id = QDU1000_SLAVE_GEMNOC_MODEM_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { QDU1000_MASTER_GEMNOC_MODEM_CNOC },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
.id = QDU1000_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { QDU1000_MASTER_GEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = QDU1000_SLAVE_EBI1,
.channels = 8,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy0_south = {
.name = "qhs_ahb2phy0_south",
.id = QDU1000_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy1_north = {
.name = "qhs_ahb2phy1_north",
.id = QDU1000_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy2_east = {
.name = "qhs_ahb2phy2_east",
.id = QDU1000_SLAVE_AHB2PHY_EAST,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = QDU1000_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = QDU1000_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = QDU1000_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
.id = QDU1000_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_crypto_cfg = {
.name = "qhs_crypto_cfg",
.id = QDU1000_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ecpri_cfg = {
.name = "qhs_ecpri_cfg",
.id = QDU1000_SLAVE_ECPRI_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = QDU1000_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
.id = QDU1000_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
.id = QDU1000_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pcie_cfg = {
.name = "qhs_pcie_cfg",
.id = QDU1000_SLAVE_PCIE_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = QDU1000_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = QDU1000_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
.id = QDU1000_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = QDU1000_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qpic = {
.name = "qhs_qpic",
.id = QDU1000_SLAVE_QPIC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
.id = QDU1000_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
.id = QDU1000_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
.id = QDU1000_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
.id = QDU1000_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_smbus_cfg = {
.name = "qhs_smbus_cfg",
.id = QDU1000_SLAVE_SMBUS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_system_noc_cfg = {
.name = "qhs_system_noc_cfg",
.id = QDU1000_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { QDU1000_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = QDU1000_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
.id = QDU1000_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_tme_cfg = {
.name = "qhs_tme_cfg",
.id = QDU1000_SLAVE_TME_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_tsc_cfg = {
.name = "qhs_tsc_cfg",
.id = QDU1000_SLAVE_TSC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_usb3 = {
.name = "qhs_usb3",
.id = QDU1000_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
.id = QDU1000_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = QDU1000_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_MASTER_ANOC_SNOC },
};
static struct qcom_icc_node qns_anoc_snoc_gsi = {
.name = "qns_anoc_snoc_gsi",
.id = QDU1000_SLAVE_ANOC_SNOC_GSI,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_MASTER_ANOC_GSI },
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
.id = QDU1000_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_ecpri_gemnoc = {
.name = "qns_ecpri_gemnoc",
.id = QDU1000_SLAVE_ECPRI_GEMNOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { QDU1000_MASTER_GEMNOC_ECPRI_DMA },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
.id = QDU1000_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { QDU1000_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
.id = QDU1000_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { QDU1000_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qns_modem = {
.name = "qns_modem",
.id = QDU1000_SLAVE_MODEM_OFFLINE,
.channels = 1,
.buswidth = 32,
.num_links = 0,
};
static struct qcom_icc_node qns_pcie_gemnoc = {
.name = "qns_pcie_gemnoc",
.id = QDU1000_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 64,
.num_links = 1,
.links = { QDU1000_MASTER_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = QDU1000_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
.id = QDU1000_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node srvc_system_noc = {
.name = "srvc_system_noc",
.id = QDU1000_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node xs_ethernet_ss = {
.name = "xs_ethernet_ss",
.id = QDU1000_SLAVE_ETHERNET_SS,
.channels = 1,
.buswidth = 32,
.num_links = 0,
};
static struct qcom_icc_node xs_pcie = {
.name = "xs_pcie",
.id = QDU1000_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 64,
.num_links = 0,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = QDU1000_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = QDU1000_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.num_nodes = 44,
.nodes = { &qhm_qpic, &qhm_qspi,
&qnm_gemnoc_cnoc, &qnm_gemnoc_modem_slave,
&qnm_gemnoc_pcie, &xm_sdc,
&xm_usb3, &qhs_ahb2phy0_south,
&qhs_ahb2phy1_north, &qhs_ahb2phy2_east,
&qhs_aoss, &qhs_clk_ctl,
&qhs_cpr_cx, &qhs_cpr_mx,
&qhs_crypto_cfg, &qhs_ecpri_cfg,
&qhs_imem_cfg, &qhs_ipc_router,
&qhs_mss_cfg, &qhs_pcie_cfg,
&qhs_pdm, &qhs_pimem_cfg,
&qhs_prng, &qhs_qdss_cfg,
&qhs_qpic, &qhs_qspi,
&qhs_qup0, &qhs_qup1,
&qhs_sdc2, &qhs_smbus_cfg,
&qhs_system_noc_cfg, &qhs_tcsr,
&qhs_tlmm, &qhs_tme_cfg,
&qhs_tsc_cfg, &qhs_usb3,
&qhs_vsense_ctrl_cfg, &qns_ddrss_cfg,
&qns_modem, &qxs_imem,
&qxs_pimem, &xs_ethernet_ss,
&xs_qdss_stm, &xs_sys_tcu_cfg
},
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.num_nodes = 2,
.nodes = { &qup0_core_slave, &qup1_core_slave },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_sh1 = {
.name = "SH1",
.num_nodes = 11,
.nodes = { &alm_sys_tcu, &chm_apps,
&qnm_ecpri_dma, &qnm_fec_2_gemnoc,
&qnm_pcie, &qnm_snoc_gc,
&qnm_snoc_sf, &qxm_mdsp,
&qns_gem_noc_cnoc, &qns_modem_slave,
&qns_pcie
},
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.num_nodes = 1,
.nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.num_nodes = 6,
.nodes = { &qhm_gic, &qxm_pimem,
&xm_gic, &xm_qdss_etr0,
&xm_qdss_etr1, &qns_gemnoc_gc
},
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.num_nodes = 5,
.nodes = { &qnm_aggre_noc, &qxm_ecpri_gsi,
&xm_ecpri_dma, &qns_anoc_snoc_gsi,
&qns_ecpri_gemnoc
},
};
static struct qcom_icc_bcm bcm_sn7 = {
.name = "SN7",
.num_nodes = 2,
.nodes = { &qns_pcie_gemnoc, &xs_pcie },
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_qup0,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
};
static const struct qcom_icc_desc qdu1000_clk_virt = {
.nodes = clk_virt_nodes,
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh1,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
[MASTER_GEMNOC_ECPRI_DMA] = &qnm_ecpri_dma,
[MASTER_FEC_2_GEMNOC] = &qnm_fec_2_gemnoc,
[MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[MASTER_MSS_PROC] = &qxm_mdsp,
[SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_GEMNOC_MODEM_CNOC] = &qns_modem_slave,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
};
static const struct qcom_icc_desc qdu1000_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
static const struct qcom_icc_desc qdu1000_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_ce0,
&bcm_cn0,
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
&bcm_sn7,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_GIC_AHB] = &qhm_gic,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QPIC] = &qhm_qpic,
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_SNOC_CFG] = &qhm_system_noc_cfg,
[MASTER_ANOC_SNOC] = &qnm_aggre_noc,
[MASTER_ANOC_GSI] = &qnm_aggre_noc_gsi,
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEMNOC_MODEM_CNOC] = &qnm_gemnoc_modem_slave,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[MASTER_CRYPTO] = &qxm_crypto,
[MASTER_ECPRI_GSI] = &qxm_ecpri_gsi,
[MASTER_PIMEM] = &qxm_pimem,
[MASTER_SNOC_ECPRI_DMA] = &xm_ecpri_dma,
[MASTER_GIC] = &xm_gic,
[MASTER_PCIE] = &xm_pcie,
[MASTER_QDSS_ETR] = &xm_qdss_etr0,
[MASTER_QDSS_ETR_1] = &xm_qdss_etr1,
[MASTER_SDCC_1] = &xm_sdc,
[MASTER_USB3] = &xm_usb3,
[SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0_south,
[SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1_north,
[SLAVE_AHB2PHY_EAST] = &qhs_ahb2phy2_east,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto_cfg,
[SLAVE_ECPRI_CFG] = &qhs_ecpri_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
[SLAVE_CNOC_MSS] = &qhs_mss_cfg,
[SLAVE_PCIE_CFG] = &qhs_pcie_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QPIC] = &qhs_qpic,
[SLAVE_QSPI_0] = &qhs_qspi,
[SLAVE_QUP_0] = &qhs_qup0,
[SLAVE_QUP_1] = &qhs_qup1,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SMBUS_CFG] = &qhs_smbus_cfg,
[SLAVE_SNOC_CFG] = &qhs_system_noc_cfg,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM] = &qhs_tlmm,
[SLAVE_TME_CFG] = &qhs_tme_cfg,
[SLAVE_TSC_CFG] = &qhs_tsc_cfg,
[SLAVE_USB3_0] = &qhs_usb3,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
[SLAVE_ANOC_SNOC_GSI] = &qns_anoc_snoc_gsi,
[SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
[SLAVE_ECPRI_GEMNOC] = &qns_ecpri_gemnoc,
[SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
[SLAVE_MODEM_OFFLINE] = &qns_modem,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_gemnoc,
[SLAVE_IMEM] = &qxs_imem,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_SERVICE_SNOC] = &srvc_system_noc,
[SLAVE_ETHERNET_SS] = &xs_ethernet_ss,
[SLAVE_PCIE_0] = &xs_pcie,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc qdu1000_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static int qnoc_probe(struct platform_device *pdev)
{
int ret;
ret = qcom_icc_rpmh_probe(pdev);
if (ret)
dev_err(&pdev->dev, "failed to register ICC provider\n");
return ret;
}
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,qdu1000-clk-virt",
.data = &qdu1000_clk_virt
},
{ .compatible = "qcom,qdu1000-gem-noc",
.data = &qdu1000_gem_noc
},
{ .compatible = "qcom,qdu1000-mc-virt",
.data = &qdu1000_mc_virt
},
{ .compatible = "qcom,qdu1000-system-noc",
.data = &qdu1000_system_noc
},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qnoc_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-qdu1000",
.of_match_table = qnoc_of_match,
},
};
static int __init qnoc_driver_init(void)
{
return platform_driver_register(&qnoc_driver);
}
core_initcall(qnoc_driver_init);
static void __exit qnoc_driver_exit(void)
{
platform_driver_unregister(&qnoc_driver);
}
module_exit(qnoc_driver_exit);
MODULE_DESCRIPTION("QDU1000 NoC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/interconnect/qcom/qdu1000.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Linaro Ltd
*/
#include <dt-bindings/interconnect/qcom,qcs404.h>
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "icc-rpm.h"
enum {
QCS404_MASTER_AMPSS_M0 = 1,
QCS404_MASTER_GRAPHICS_3D,
QCS404_MASTER_MDP_PORT0,
QCS404_SNOC_BIMC_1_MAS,
QCS404_MASTER_TCU_0,
QCS404_MASTER_SPDM,
QCS404_MASTER_BLSP_1,
QCS404_MASTER_BLSP_2,
QCS404_MASTER_XM_USB_HS1,
QCS404_MASTER_CRYPTO_CORE0,
QCS404_MASTER_SDCC_1,
QCS404_MASTER_SDCC_2,
QCS404_SNOC_PNOC_MAS,
QCS404_MASTER_QPIC,
QCS404_MASTER_QDSS_BAM,
QCS404_BIMC_SNOC_MAS,
QCS404_PNOC_SNOC_MAS,
QCS404_MASTER_QDSS_ETR,
QCS404_MASTER_EMAC,
QCS404_MASTER_PCIE,
QCS404_MASTER_USB3,
QCS404_PNOC_INT_0,
QCS404_PNOC_INT_2,
QCS404_PNOC_INT_3,
QCS404_PNOC_SLV_0,
QCS404_PNOC_SLV_1,
QCS404_PNOC_SLV_2,
QCS404_PNOC_SLV_3,
QCS404_PNOC_SLV_4,
QCS404_PNOC_SLV_6,
QCS404_PNOC_SLV_7,
QCS404_PNOC_SLV_8,
QCS404_PNOC_SLV_9,
QCS404_PNOC_SLV_10,
QCS404_PNOC_SLV_11,
QCS404_SNOC_QDSS_INT,
QCS404_SNOC_INT_0,
QCS404_SNOC_INT_1,
QCS404_SNOC_INT_2,
QCS404_SLAVE_EBI_CH0,
QCS404_BIMC_SNOC_SLV,
QCS404_SLAVE_SPDM_WRAPPER,
QCS404_SLAVE_PDM,
QCS404_SLAVE_PRNG,
QCS404_SLAVE_TCSR,
QCS404_SLAVE_SNOC_CFG,
QCS404_SLAVE_MESSAGE_RAM,
QCS404_SLAVE_DISPLAY_CFG,
QCS404_SLAVE_GRAPHICS_3D_CFG,
QCS404_SLAVE_BLSP_1,
QCS404_SLAVE_TLMM_NORTH,
QCS404_SLAVE_PCIE_1,
QCS404_SLAVE_EMAC_CFG,
QCS404_SLAVE_BLSP_2,
QCS404_SLAVE_TLMM_EAST,
QCS404_SLAVE_TCU,
QCS404_SLAVE_PMIC_ARB,
QCS404_SLAVE_SDCC_1,
QCS404_SLAVE_SDCC_2,
QCS404_SLAVE_TLMM_SOUTH,
QCS404_SLAVE_USB_HS,
QCS404_SLAVE_USB3,
QCS404_SLAVE_CRYPTO_0_CFG,
QCS404_PNOC_SNOC_SLV,
QCS404_SLAVE_APPSS,
QCS404_SLAVE_WCSS,
QCS404_SNOC_BIMC_1_SLV,
QCS404_SLAVE_OCIMEM,
QCS404_SNOC_PNOC_SLV,
QCS404_SLAVE_QDSS_STM,
QCS404_SLAVE_CATS_128,
QCS404_SLAVE_OCMEM_64,
QCS404_SLAVE_LPASS,
};
static const u16 mas_apps_proc_links[] = {
QCS404_SLAVE_EBI_CH0,
QCS404_BIMC_SNOC_SLV
};
static struct qcom_icc_node mas_apps_proc = {
.name = "mas_apps_proc",
.id = QCS404_MASTER_AMPSS_M0,
.buswidth = 8,
.mas_rpm_id = 0,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_apps_proc_links),
.links = mas_apps_proc_links,
};
static const u16 mas_oxili_links[] = {
QCS404_SLAVE_EBI_CH0,
QCS404_BIMC_SNOC_SLV
};
static struct qcom_icc_node mas_oxili = {
.name = "mas_oxili",
.id = QCS404_MASTER_GRAPHICS_3D,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_oxili_links),
.links = mas_oxili_links,
};
static const u16 mas_mdp_links[] = {
QCS404_SLAVE_EBI_CH0,
QCS404_BIMC_SNOC_SLV
};
static struct qcom_icc_node mas_mdp = {
.name = "mas_mdp",
.id = QCS404_MASTER_MDP_PORT0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_mdp_links),
.links = mas_mdp_links,
};
static const u16 mas_snoc_bimc_1_links[] = {
QCS404_SLAVE_EBI_CH0
};
static struct qcom_icc_node mas_snoc_bimc_1 = {
.name = "mas_snoc_bimc_1",
.id = QCS404_SNOC_BIMC_1_MAS,
.buswidth = 8,
.mas_rpm_id = 76,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_bimc_1_links),
.links = mas_snoc_bimc_1_links,
};
static const u16 mas_tcu_0_links[] = {
QCS404_SLAVE_EBI_CH0,
QCS404_BIMC_SNOC_SLV
};
static struct qcom_icc_node mas_tcu_0 = {
.name = "mas_tcu_0",
.id = QCS404_MASTER_TCU_0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_tcu_0_links),
.links = mas_tcu_0_links,
};
static const u16 mas_spdm_links[] = {
QCS404_PNOC_INT_3
};
static struct qcom_icc_node mas_spdm = {
.name = "mas_spdm",
.id = QCS404_MASTER_SPDM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_spdm_links),
.links = mas_spdm_links,
};
static const u16 mas_blsp_1_links[] = {
QCS404_PNOC_INT_3
};
static struct qcom_icc_node mas_blsp_1 = {
.name = "mas_blsp_1",
.id = QCS404_MASTER_BLSP_1,
.buswidth = 4,
.mas_rpm_id = 41,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_blsp_1_links),
.links = mas_blsp_1_links,
};
static const u16 mas_blsp_2_links[] = {
QCS404_PNOC_INT_3
};
static struct qcom_icc_node mas_blsp_2 = {
.name = "mas_blsp_2",
.id = QCS404_MASTER_BLSP_2,
.buswidth = 4,
.mas_rpm_id = 39,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_blsp_2_links),
.links = mas_blsp_2_links,
};
static const u16 mas_xi_usb_hs1_links[] = {
QCS404_PNOC_INT_0
};
static struct qcom_icc_node mas_xi_usb_hs1 = {
.name = "mas_xi_usb_hs1",
.id = QCS404_MASTER_XM_USB_HS1,
.buswidth = 8,
.mas_rpm_id = 138,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_xi_usb_hs1_links),
.links = mas_xi_usb_hs1_links,
};
static const u16 mas_crypto_links[] = {
QCS404_PNOC_SNOC_SLV,
QCS404_PNOC_INT_2
};
static struct qcom_icc_node mas_crypto = {
.name = "mas_crypto",
.id = QCS404_MASTER_CRYPTO_CORE0,
.buswidth = 8,
.mas_rpm_id = 23,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_crypto_links),
.links = mas_crypto_links,
};
static const u16 mas_sdcc_1_links[] = {
QCS404_PNOC_INT_0
};
static struct qcom_icc_node mas_sdcc_1 = {
.name = "mas_sdcc_1",
.id = QCS404_MASTER_SDCC_1,
.buswidth = 8,
.mas_rpm_id = 33,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_sdcc_1_links),
.links = mas_sdcc_1_links,
};
static const u16 mas_sdcc_2_links[] = {
QCS404_PNOC_INT_0
};
static struct qcom_icc_node mas_sdcc_2 = {
.name = "mas_sdcc_2",
.id = QCS404_MASTER_SDCC_2,
.buswidth = 8,
.mas_rpm_id = 35,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_sdcc_2_links),
.links = mas_sdcc_2_links,
};
static const u16 mas_snoc_pcnoc_links[] = {
QCS404_PNOC_INT_2
};
static struct qcom_icc_node mas_snoc_pcnoc = {
.name = "mas_snoc_pcnoc",
.id = QCS404_SNOC_PNOC_MAS,
.buswidth = 8,
.mas_rpm_id = 77,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_pcnoc_links),
.links = mas_snoc_pcnoc_links,
};
static const u16 mas_qpic_links[] = {
QCS404_PNOC_INT_0
};
static struct qcom_icc_node mas_qpic = {
.name = "mas_qpic",
.id = QCS404_MASTER_QPIC,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_qpic_links),
.links = mas_qpic_links,
};
static const u16 mas_qdss_bam_links[] = {
QCS404_SNOC_QDSS_INT
};
static struct qcom_icc_node mas_qdss_bam = {
.name = "mas_qdss_bam",
.id = QCS404_MASTER_QDSS_BAM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_qdss_bam_links),
.links = mas_qdss_bam_links,
};
static const u16 mas_bimc_snoc_links[] = {
QCS404_SLAVE_OCMEM_64,
QCS404_SLAVE_CATS_128,
QCS404_SNOC_INT_0,
QCS404_SNOC_INT_1
};
static struct qcom_icc_node mas_bimc_snoc = {
.name = "mas_bimc_snoc",
.id = QCS404_BIMC_SNOC_MAS,
.buswidth = 8,
.mas_rpm_id = 21,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_bimc_snoc_links),
.links = mas_bimc_snoc_links,
};
static const u16 mas_pcnoc_snoc_links[] = {
QCS404_SNOC_BIMC_1_SLV,
QCS404_SNOC_INT_2,
QCS404_SNOC_INT_0
};
static struct qcom_icc_node mas_pcnoc_snoc = {
.name = "mas_pcnoc_snoc",
.id = QCS404_PNOC_SNOC_MAS,
.buswidth = 8,
.mas_rpm_id = 29,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pcnoc_snoc_links),
.links = mas_pcnoc_snoc_links,
};
static const u16 mas_qdss_etr_links[] = {
QCS404_SNOC_QDSS_INT
};
static struct qcom_icc_node mas_qdss_etr = {
.name = "mas_qdss_etr",
.id = QCS404_MASTER_QDSS_ETR,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_qdss_etr_links),
.links = mas_qdss_etr_links,
};
static const u16 mas_emac_links[] = {
QCS404_SNOC_BIMC_1_SLV,
QCS404_SNOC_INT_1
};
static struct qcom_icc_node mas_emac = {
.name = "mas_emac",
.id = QCS404_MASTER_EMAC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_emac_links),
.links = mas_emac_links,
};
static const u16 mas_pcie_links[] = {
QCS404_SNOC_BIMC_1_SLV,
QCS404_SNOC_INT_1
};
static struct qcom_icc_node mas_pcie = {
.name = "mas_pcie",
.id = QCS404_MASTER_PCIE,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pcie_links),
.links = mas_pcie_links,
};
static const u16 mas_usb3_links[] = {
QCS404_SNOC_BIMC_1_SLV,
QCS404_SNOC_INT_1
};
static struct qcom_icc_node mas_usb3 = {
.name = "mas_usb3",
.id = QCS404_MASTER_USB3,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_usb3_links),
.links = mas_usb3_links,
};
static const u16 pcnoc_int_0_links[] = {
QCS404_PNOC_SNOC_SLV,
QCS404_PNOC_INT_2
};
static struct qcom_icc_node pcnoc_int_0 = {
.name = "pcnoc_int_0",
.id = QCS404_PNOC_INT_0,
.buswidth = 8,
.mas_rpm_id = 85,
.slv_rpm_id = 114,
.num_links = ARRAY_SIZE(pcnoc_int_0_links),
.links = pcnoc_int_0_links,
};
static const u16 pcnoc_int_2_links[] = {
QCS404_PNOC_SLV_10,
QCS404_SLAVE_TCU,
QCS404_PNOC_SLV_11,
QCS404_PNOC_SLV_2,
QCS404_PNOC_SLV_3,
QCS404_PNOC_SLV_0,
QCS404_PNOC_SLV_1,
QCS404_PNOC_SLV_6,
QCS404_PNOC_SLV_7,
QCS404_PNOC_SLV_4,
QCS404_PNOC_SLV_8,
QCS404_PNOC_SLV_9
};
static struct qcom_icc_node pcnoc_int_2 = {
.name = "pcnoc_int_2",
.id = QCS404_PNOC_INT_2,
.buswidth = 8,
.mas_rpm_id = 124,
.slv_rpm_id = 184,
.num_links = ARRAY_SIZE(pcnoc_int_2_links),
.links = pcnoc_int_2_links,
};
static const u16 pcnoc_int_3_links[] = {
QCS404_PNOC_SNOC_SLV
};
static struct qcom_icc_node pcnoc_int_3 = {
.name = "pcnoc_int_3",
.id = QCS404_PNOC_INT_3,
.buswidth = 8,
.mas_rpm_id = 125,
.slv_rpm_id = 185,
.num_links = ARRAY_SIZE(pcnoc_int_3_links),
.links = pcnoc_int_3_links,
};
static const u16 pcnoc_s_0_links[] = {
QCS404_SLAVE_PRNG,
QCS404_SLAVE_SPDM_WRAPPER,
QCS404_SLAVE_PDM
};
static struct qcom_icc_node pcnoc_s_0 = {
.name = "pcnoc_s_0",
.id = QCS404_PNOC_SLV_0,
.buswidth = 4,
.mas_rpm_id = 89,
.slv_rpm_id = 118,
.num_links = ARRAY_SIZE(pcnoc_s_0_links),
.links = pcnoc_s_0_links,
};
static const u16 pcnoc_s_1_links[] = {
QCS404_SLAVE_TCSR
};
static struct qcom_icc_node pcnoc_s_1 = {
.name = "pcnoc_s_1",
.id = QCS404_PNOC_SLV_1,
.buswidth = 4,
.mas_rpm_id = 90,
.slv_rpm_id = 119,
.num_links = ARRAY_SIZE(pcnoc_s_1_links),
.links = pcnoc_s_1_links,
};
static const u16 pcnoc_s_2_links[] = {
QCS404_SLAVE_GRAPHICS_3D_CFG
};
static struct qcom_icc_node pcnoc_s_2 = {
.name = "pcnoc_s_2",
.id = QCS404_PNOC_SLV_2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_2_links),
.links = pcnoc_s_2_links,
};
static const u16 pcnoc_s_3_links[] = {
QCS404_SLAVE_MESSAGE_RAM
};
static struct qcom_icc_node pcnoc_s_3 = {
.name = "pcnoc_s_3",
.id = QCS404_PNOC_SLV_3,
.buswidth = 4,
.mas_rpm_id = 92,
.slv_rpm_id = 121,
.num_links = ARRAY_SIZE(pcnoc_s_3_links),
.links = pcnoc_s_3_links,
};
static const u16 pcnoc_s_4_links[] = {
QCS404_SLAVE_SNOC_CFG
};
static struct qcom_icc_node pcnoc_s_4 = {
.name = "pcnoc_s_4",
.id = QCS404_PNOC_SLV_4,
.buswidth = 4,
.mas_rpm_id = 93,
.slv_rpm_id = 122,
.num_links = ARRAY_SIZE(pcnoc_s_4_links),
.links = pcnoc_s_4_links,
};
static const u16 pcnoc_s_6_links[] = {
QCS404_SLAVE_BLSP_1,
QCS404_SLAVE_TLMM_NORTH,
QCS404_SLAVE_EMAC_CFG
};
static struct qcom_icc_node pcnoc_s_6 = {
.name = "pcnoc_s_6",
.id = QCS404_PNOC_SLV_6,
.buswidth = 4,
.mas_rpm_id = 94,
.slv_rpm_id = 123,
.num_links = ARRAY_SIZE(pcnoc_s_6_links),
.links = pcnoc_s_6_links,
};
static const u16 pcnoc_s_7_links[] = {
QCS404_SLAVE_TLMM_SOUTH,
QCS404_SLAVE_DISPLAY_CFG,
QCS404_SLAVE_SDCC_1,
QCS404_SLAVE_PCIE_1,
QCS404_SLAVE_SDCC_2
};
static struct qcom_icc_node pcnoc_s_7 = {
.name = "pcnoc_s_7",
.id = QCS404_PNOC_SLV_7,
.buswidth = 4,
.mas_rpm_id = 95,
.slv_rpm_id = 124,
.num_links = ARRAY_SIZE(pcnoc_s_7_links),
.links = pcnoc_s_7_links,
};
static const u16 pcnoc_s_8_links[] = {
QCS404_SLAVE_CRYPTO_0_CFG
};
static struct qcom_icc_node pcnoc_s_8 = {
.name = "pcnoc_s_8",
.id = QCS404_PNOC_SLV_8,
.buswidth = 4,
.mas_rpm_id = 96,
.slv_rpm_id = 125,
.num_links = ARRAY_SIZE(pcnoc_s_8_links),
.links = pcnoc_s_8_links,
};
static const u16 pcnoc_s_9_links[] = {
QCS404_SLAVE_BLSP_2,
QCS404_SLAVE_TLMM_EAST,
QCS404_SLAVE_PMIC_ARB
};
static struct qcom_icc_node pcnoc_s_9 = {
.name = "pcnoc_s_9",
.id = QCS404_PNOC_SLV_9,
.buswidth = 4,
.mas_rpm_id = 97,
.slv_rpm_id = 126,
.num_links = ARRAY_SIZE(pcnoc_s_9_links),
.links = pcnoc_s_9_links,
};
static const u16 pcnoc_s_10_links[] = {
QCS404_SLAVE_USB_HS
};
static struct qcom_icc_node pcnoc_s_10 = {
.name = "pcnoc_s_10",
.id = QCS404_PNOC_SLV_10,
.buswidth = 4,
.mas_rpm_id = 157,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_10_links),
.links = pcnoc_s_10_links,
};
static const u16 pcnoc_s_11_links[] = {
QCS404_SLAVE_USB3
};
static struct qcom_icc_node pcnoc_s_11 = {
.name = "pcnoc_s_11",
.id = QCS404_PNOC_SLV_11,
.buswidth = 4,
.mas_rpm_id = 158,
.slv_rpm_id = 246,
.num_links = ARRAY_SIZE(pcnoc_s_11_links),
.links = pcnoc_s_11_links,
};
static const u16 qdss_int_links[] = {
QCS404_SNOC_BIMC_1_SLV,
QCS404_SNOC_INT_1
};
static struct qcom_icc_node qdss_int = {
.name = "qdss_int",
.id = QCS404_SNOC_QDSS_INT,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(qdss_int_links),
.links = qdss_int_links,
};
static const u16 snoc_int_0_links[] = {
QCS404_SLAVE_LPASS,
QCS404_SLAVE_APPSS,
QCS404_SLAVE_WCSS
};
static struct qcom_icc_node snoc_int_0 = {
.name = "snoc_int_0",
.id = QCS404_SNOC_INT_0,
.buswidth = 8,
.mas_rpm_id = 99,
.slv_rpm_id = 130,
.num_links = ARRAY_SIZE(snoc_int_0_links),
.links = snoc_int_0_links,
};
static const u16 snoc_int_1_links[] = {
QCS404_SNOC_PNOC_SLV,
QCS404_SNOC_INT_2
};
static struct qcom_icc_node snoc_int_1 = {
.name = "snoc_int_1",
.id = QCS404_SNOC_INT_1,
.buswidth = 8,
.mas_rpm_id = 100,
.slv_rpm_id = 131,
.num_links = ARRAY_SIZE(snoc_int_1_links),
.links = snoc_int_1_links,
};
static const u16 snoc_int_2_links[] = {
QCS404_SLAVE_QDSS_STM,
QCS404_SLAVE_OCIMEM
};
static struct qcom_icc_node snoc_int_2 = {
.name = "snoc_int_2",
.id = QCS404_SNOC_INT_2,
.buswidth = 8,
.mas_rpm_id = 134,
.slv_rpm_id = 197,
.num_links = ARRAY_SIZE(snoc_int_2_links),
.links = snoc_int_2_links,
};
static struct qcom_icc_node slv_ebi = {
.name = "slv_ebi",
.id = QCS404_SLAVE_EBI_CH0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 0,
};
static const u16 slv_bimc_snoc_links[] = {
QCS404_BIMC_SNOC_MAS
};
static struct qcom_icc_node slv_bimc_snoc = {
.name = "slv_bimc_snoc",
.id = QCS404_BIMC_SNOC_SLV,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 2,
.num_links = ARRAY_SIZE(slv_bimc_snoc_links),
.links = slv_bimc_snoc_links,
};
static struct qcom_icc_node slv_spdm = {
.name = "slv_spdm",
.id = QCS404_SLAVE_SPDM_WRAPPER,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_pdm = {
.name = "slv_pdm",
.id = QCS404_SLAVE_PDM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 41,
};
static struct qcom_icc_node slv_prng = {
.name = "slv_prng",
.id = QCS404_SLAVE_PRNG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 44,
};
static struct qcom_icc_node slv_tcsr = {
.name = "slv_tcsr",
.id = QCS404_SLAVE_TCSR,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 50,
};
static struct qcom_icc_node slv_snoc_cfg = {
.name = "slv_snoc_cfg",
.id = QCS404_SLAVE_SNOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 70,
};
static struct qcom_icc_node slv_message_ram = {
.name = "slv_message_ram",
.id = QCS404_SLAVE_MESSAGE_RAM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 55,
};
static struct qcom_icc_node slv_disp_ss_cfg = {
.name = "slv_disp_ss_cfg",
.id = QCS404_SLAVE_DISPLAY_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_gpu_cfg = {
.name = "slv_gpu_cfg",
.id = QCS404_SLAVE_GRAPHICS_3D_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_blsp_1 = {
.name = "slv_blsp_1",
.id = QCS404_SLAVE_BLSP_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 39,
};
static struct qcom_icc_node slv_tlmm_north = {
.name = "slv_tlmm_north",
.id = QCS404_SLAVE_TLMM_NORTH,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 214,
};
static struct qcom_icc_node slv_pcie = {
.name = "slv_pcie",
.id = QCS404_SLAVE_PCIE_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_ethernet = {
.name = "slv_ethernet",
.id = QCS404_SLAVE_EMAC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_blsp_2 = {
.name = "slv_blsp_2",
.id = QCS404_SLAVE_BLSP_2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 37,
};
static struct qcom_icc_node slv_tlmm_east = {
.name = "slv_tlmm_east",
.id = QCS404_SLAVE_TLMM_EAST,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 213,
};
static struct qcom_icc_node slv_tcu = {
.name = "slv_tcu",
.id = QCS404_SLAVE_TCU,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_pmic_arb = {
.name = "slv_pmic_arb",
.id = QCS404_SLAVE_PMIC_ARB,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 59,
};
static struct qcom_icc_node slv_sdcc_1 = {
.name = "slv_sdcc_1",
.id = QCS404_SLAVE_SDCC_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 31,
};
static struct qcom_icc_node slv_sdcc_2 = {
.name = "slv_sdcc_2",
.id = QCS404_SLAVE_SDCC_2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 33,
};
static struct qcom_icc_node slv_tlmm_south = {
.name = "slv_tlmm_south",
.id = QCS404_SLAVE_TLMM_SOUTH,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_usb_hs = {
.name = "slv_usb_hs",
.id = QCS404_SLAVE_USB_HS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 40,
};
static struct qcom_icc_node slv_usb3 = {
.name = "slv_usb3",
.id = QCS404_SLAVE_USB3,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 22,
};
static struct qcom_icc_node slv_crypto_0_cfg = {
.name = "slv_crypto_0_cfg",
.id = QCS404_SLAVE_CRYPTO_0_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 52,
};
static const u16 slv_pcnoc_snoc_links[] = {
QCS404_PNOC_SNOC_MAS
};
static struct qcom_icc_node slv_pcnoc_snoc = {
.name = "slv_pcnoc_snoc",
.id = QCS404_PNOC_SNOC_SLV,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 45,
.num_links = ARRAY_SIZE(slv_pcnoc_snoc_links),
.links = slv_pcnoc_snoc_links,
};
static struct qcom_icc_node slv_kpss_ahb = {
.name = "slv_kpss_ahb",
.id = QCS404_SLAVE_APPSS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_wcss = {
.name = "slv_wcss",
.id = QCS404_SLAVE_WCSS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 23,
};
static const u16 slv_snoc_bimc_1_links[] = {
QCS404_SNOC_BIMC_1_MAS
};
static struct qcom_icc_node slv_snoc_bimc_1 = {
.name = "slv_snoc_bimc_1",
.id = QCS404_SNOC_BIMC_1_SLV,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 104,
.num_links = ARRAY_SIZE(slv_snoc_bimc_1_links),
.links = slv_snoc_bimc_1_links,
};
static struct qcom_icc_node slv_imem = {
.name = "slv_imem",
.id = QCS404_SLAVE_OCIMEM,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 26,
};
static const u16 slv_snoc_pcnoc_links[] = {
QCS404_SNOC_PNOC_MAS
};
static struct qcom_icc_node slv_snoc_pcnoc = {
.name = "slv_snoc_pcnoc",
.id = QCS404_SNOC_PNOC_SLV,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 28,
.num_links = ARRAY_SIZE(slv_snoc_pcnoc_links),
.links = slv_snoc_pcnoc_links,
};
static struct qcom_icc_node slv_qdss_stm = {
.name = "slv_qdss_stm",
.id = QCS404_SLAVE_QDSS_STM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 30,
};
static struct qcom_icc_node slv_cats_0 = {
.name = "slv_cats_0",
.id = QCS404_SLAVE_CATS_128,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_cats_1 = {
.name = "slv_cats_1",
.id = QCS404_SLAVE_OCMEM_64,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_lpass = {
.name = "slv_lpass",
.id = QCS404_SLAVE_LPASS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node * const qcs404_bimc_nodes[] = {
[MASTER_AMPSS_M0] = &mas_apps_proc,
[MASTER_OXILI] = &mas_oxili,
[MASTER_MDP_PORT0] = &mas_mdp,
[MASTER_SNOC_BIMC_1] = &mas_snoc_bimc_1,
[MASTER_TCU_0] = &mas_tcu_0,
[SLAVE_EBI_CH0] = &slv_ebi,
[SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
};
static const struct qcom_icc_desc qcs404_bimc = {
.bus_clk_desc = &bimc_clk,
.nodes = qcs404_bimc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_bimc_nodes),
};
static struct qcom_icc_node * const qcs404_pcnoc_nodes[] = {
[MASTER_SPDM] = &mas_spdm,
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_BLSP_2] = &mas_blsp_2,
[MASTER_XI_USB_HS1] = &mas_xi_usb_hs1,
[MASTER_CRYPT0] = &mas_crypto,
[MASTER_SDCC_1] = &mas_sdcc_1,
[MASTER_SDCC_2] = &mas_sdcc_2,
[MASTER_SNOC_PCNOC] = &mas_snoc_pcnoc,
[MASTER_QPIC] = &mas_qpic,
[PCNOC_INT_0] = &pcnoc_int_0,
[PCNOC_INT_2] = &pcnoc_int_2,
[PCNOC_INT_3] = &pcnoc_int_3,
[PCNOC_S_0] = &pcnoc_s_0,
[PCNOC_S_1] = &pcnoc_s_1,
[PCNOC_S_2] = &pcnoc_s_2,
[PCNOC_S_3] = &pcnoc_s_3,
[PCNOC_S_4] = &pcnoc_s_4,
[PCNOC_S_6] = &pcnoc_s_6,
[PCNOC_S_7] = &pcnoc_s_7,
[PCNOC_S_8] = &pcnoc_s_8,
[PCNOC_S_9] = &pcnoc_s_9,
[PCNOC_S_10] = &pcnoc_s_10,
[PCNOC_S_11] = &pcnoc_s_11,
[SLAVE_SPDM] = &slv_spdm,
[SLAVE_PDM] = &slv_pdm,
[SLAVE_PRNG] = &slv_prng,
[SLAVE_TCSR] = &slv_tcsr,
[SLAVE_SNOC_CFG] = &slv_snoc_cfg,
[SLAVE_MESSAGE_RAM] = &slv_message_ram,
[SLAVE_DISP_SS_CFG] = &slv_disp_ss_cfg,
[SLAVE_GPU_CFG] = &slv_gpu_cfg,
[SLAVE_BLSP_1] = &slv_blsp_1,
[SLAVE_BLSP_2] = &slv_blsp_2,
[SLAVE_TLMM_NORTH] = &slv_tlmm_north,
[SLAVE_PCIE] = &slv_pcie,
[SLAVE_ETHERNET] = &slv_ethernet,
[SLAVE_TLMM_EAST] = &slv_tlmm_east,
[SLAVE_TCU] = &slv_tcu,
[SLAVE_PMIC_ARB] = &slv_pmic_arb,
[SLAVE_SDCC_1] = &slv_sdcc_1,
[SLAVE_SDCC_2] = &slv_sdcc_2,
[SLAVE_TLMM_SOUTH] = &slv_tlmm_south,
[SLAVE_USB_HS] = &slv_usb_hs,
[SLAVE_USB3] = &slv_usb3,
[SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
[SLAVE_PCNOC_SNOC] = &slv_pcnoc_snoc,
};
static const struct qcom_icc_desc qcs404_pcnoc = {
.bus_clk_desc = &bus_0_clk,
.nodes = qcs404_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_pcnoc_nodes),
};
static struct qcom_icc_node * const qcs404_snoc_nodes[] = {
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_BIMC_SNOC] = &mas_bimc_snoc,
[MASTER_PCNOC_SNOC] = &mas_pcnoc_snoc,
[MASTER_QDSS_ETR] = &mas_qdss_etr,
[MASTER_EMAC] = &mas_emac,
[MASTER_PCIE] = &mas_pcie,
[MASTER_USB3] = &mas_usb3,
[QDSS_INT] = &qdss_int,
[SNOC_INT_0] = &snoc_int_0,
[SNOC_INT_1] = &snoc_int_1,
[SNOC_INT_2] = &snoc_int_2,
[SLAVE_KPSS_AHB] = &slv_kpss_ahb,
[SLAVE_WCSS] = &slv_wcss,
[SLAVE_SNOC_BIMC_1] = &slv_snoc_bimc_1,
[SLAVE_IMEM] = &slv_imem,
[SLAVE_SNOC_PCNOC] = &slv_snoc_pcnoc,
[SLAVE_QDSS_STM] = &slv_qdss_stm,
[SLAVE_CATS_0] = &slv_cats_0,
[SLAVE_CATS_1] = &slv_cats_1,
[SLAVE_LPASS] = &slv_lpass,
};
static const struct qcom_icc_desc qcs404_snoc = {
.bus_clk_desc = &bus_1_clk,
.nodes = qcs404_snoc_nodes,
.num_nodes = ARRAY_SIZE(qcs404_snoc_nodes),
};
static const struct of_device_id qcs404_noc_of_match[] = {
{ .compatible = "qcom,qcs404-bimc", .data = &qcs404_bimc },
{ .compatible = "qcom,qcs404-pcnoc", .data = &qcs404_pcnoc },
{ .compatible = "qcom,qcs404-snoc", .data = &qcs404_snoc },
{ },
};
MODULE_DEVICE_TABLE(of, qcs404_noc_of_match);
static struct platform_driver qcs404_noc_driver = {
.probe = qnoc_probe,
.remove = qnoc_remove,
.driver = {
.name = "qnoc-qcs404",
.of_match_table = qcs404_noc_of_match,
},
};
module_platform_driver(qcs404_noc_driver);
MODULE_DESCRIPTION("Qualcomm QCS404 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/qcs404.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Linaro Ltd
*/
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include "icc-common.h"
#include "icc-rpm.h"
/* QNOC QoS */
#define QNOC_QOS_MCTL_LOWn_ADDR(n) (0x8 + (n * 0x1000))
#define QNOC_QOS_MCTL_DFLT_PRIO_MASK 0x70
#define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT 4
#define QNOC_QOS_MCTL_URGFWD_EN_MASK 0x8
#define QNOC_QOS_MCTL_URGFWD_EN_SHIFT 3
/* BIMC QoS */
#define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n))
#define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n))
#define M_BKE_HEALTH_CFG_ADDR(i, n) (M_BKE_REG_BASE(n) + 0x40 + (0x4 * i))
#define M_BKE_HEALTH_CFG_LIMITCMDS_MASK 0x80000000
#define M_BKE_HEALTH_CFG_AREQPRIO_MASK 0x300
#define M_BKE_HEALTH_CFG_PRIOLVL_MASK 0x3
#define M_BKE_HEALTH_CFG_AREQPRIO_SHIFT 0x8
#define M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT 0x1f
#define M_BKE_EN_EN_BMASK 0x1
/* NoC QoS */
#define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
#define NOC_QOS_PRIORITY_P1_MASK 0xc
#define NOC_QOS_PRIORITY_P0_MASK 0x3
#define NOC_QOS_PRIORITY_P1_SHIFT 0x2
#define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
#define NOC_QOS_MODEn_MASK 0x3
#define NOC_QOS_MODE_FIXED_VAL 0x0
#define NOC_QOS_MODE_BYPASS_VAL 0x2
#define ICC_BUS_CLK_MIN_RATE 19200ULL /* kHz */
static int qcom_icc_set_qnoc_qos(struct icc_node *src)
{
struct icc_provider *provider = src->provider;
struct qcom_icc_provider *qp = to_qcom_provider(provider);
struct qcom_icc_node *qn = src->data;
struct qcom_icc_qos *qos = &qn->qos;
int rc;
rc = regmap_update_bits(qp->regmap,
qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
QNOC_QOS_MCTL_DFLT_PRIO_MASK,
qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT);
if (rc)
return rc;
return regmap_update_bits(qp->regmap,
qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
QNOC_QOS_MCTL_URGFWD_EN_MASK,
!!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT);
}
static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
struct qcom_icc_qos *qos,
int regnum)
{
u32 val;
u32 mask;
val = qos->prio_level;
mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
/* LIMITCMDS is not present on M_BKE_HEALTH_3 */
if (regnum != 3) {
val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
}
return regmap_update_bits(qp->regmap,
qp->qos_offset + M_BKE_HEALTH_CFG_ADDR(regnum, qos->qos_port),
mask, val);
}
static int qcom_icc_set_bimc_qos(struct icc_node *src)
{
struct qcom_icc_provider *qp;
struct qcom_icc_node *qn;
struct icc_provider *provider;
u32 mode = NOC_QOS_MODE_BYPASS;
u32 val = 0;
int i, rc = 0;
qn = src->data;
provider = src->provider;
qp = to_qcom_provider(provider);
if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
mode = qn->qos.qos_mode;
/* QoS Priority: The QoS Health parameters are getting considered
* only if we are NOT in Bypass Mode.
*/
if (mode != NOC_QOS_MODE_BYPASS) {
for (i = 3; i >= 0; i--) {
rc = qcom_icc_bimc_set_qos_health(qp,
&qn->qos, i);
if (rc)
return rc;
}
/* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
val = 1;
}
return regmap_update_bits(qp->regmap,
qp->qos_offset + M_BKE_EN_ADDR(qn->qos.qos_port),
M_BKE_EN_EN_BMASK, val);
}
static int qcom_icc_noc_set_qos_priority(struct qcom_icc_provider *qp,
struct qcom_icc_qos *qos)
{
u32 val;
int rc;
/* Must be updated one at a time, P1 first, P0 last */
val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
rc = regmap_update_bits(qp->regmap,
qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
NOC_QOS_PRIORITY_P1_MASK, val);
if (rc)
return rc;
return regmap_update_bits(qp->regmap,
qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
}
static int qcom_icc_set_noc_qos(struct icc_node *src)
{
struct qcom_icc_provider *qp;
struct qcom_icc_node *qn;
struct icc_provider *provider;
u32 mode = NOC_QOS_MODE_BYPASS_VAL;
int rc = 0;
qn = src->data;
provider = src->provider;
qp = to_qcom_provider(provider);
if (qn->qos.qos_port < 0) {
dev_dbg(src->provider->dev,
"NoC QoS: Skipping %s: vote aggregated on parent.\n",
qn->name);
return 0;
}
if (qn->qos.qos_mode == NOC_QOS_MODE_FIXED) {
dev_dbg(src->provider->dev, "NoC QoS: %s: Set Fixed mode\n", qn->name);
mode = NOC_QOS_MODE_FIXED_VAL;
rc = qcom_icc_noc_set_qos_priority(qp, &qn->qos);
if (rc)
return rc;
} else if (qn->qos.qos_mode == NOC_QOS_MODE_BYPASS) {
dev_dbg(src->provider->dev, "NoC QoS: %s: Set Bypass mode\n", qn->name);
mode = NOC_QOS_MODE_BYPASS_VAL;
} else {
/* How did we get here? */
}
return regmap_update_bits(qp->regmap,
qp->qos_offset + NOC_QOS_MODEn_ADDR(qn->qos.qos_port),
NOC_QOS_MODEn_MASK, mode);
}
static int qcom_icc_qos_set(struct icc_node *node)
{
struct qcom_icc_provider *qp = to_qcom_provider(node->provider);
struct qcom_icc_node *qn = node->data;
dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
switch (qp->type) {
case QCOM_ICC_BIMC:
return qcom_icc_set_bimc_qos(node);
case QCOM_ICC_QNOC:
return qcom_icc_set_qnoc_qos(node);
default:
return qcom_icc_set_noc_qos(node);
}
}
static int qcom_icc_rpm_set(struct qcom_icc_node *qn, u64 *bw)
{
int ret, rpm_ctx = 0;
u64 bw_bps;
if (qn->qos.ap_owned)
return 0;
for (rpm_ctx = 0; rpm_ctx < QCOM_SMD_RPM_STATE_NUM; rpm_ctx++) {
bw_bps = icc_units_to_bps(bw[rpm_ctx]);
if (qn->mas_rpm_id != -1) {
ret = qcom_icc_rpm_smd_send(rpm_ctx,
RPM_BUS_MASTER_REQ,
qn->mas_rpm_id,
bw_bps);
if (ret) {
pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
qn->mas_rpm_id, ret);
return ret;
}
}
if (qn->slv_rpm_id != -1) {
ret = qcom_icc_rpm_smd_send(rpm_ctx,
RPM_BUS_SLAVE_REQ,
qn->slv_rpm_id,
bw_bps);
if (ret) {
pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
qn->slv_rpm_id, ret);
return ret;
}
}
}
return 0;
}
/**
* qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests
* @node: icc node to operate on
*/
static void qcom_icc_pre_bw_aggregate(struct icc_node *node)
{
struct qcom_icc_node *qn;
size_t i;
qn = node->data;
for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
qn->sum_avg[i] = 0;
qn->max_peak[i] = 0;
}
}
/**
* qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag
* @node: node to aggregate
* @tag: tag to indicate which buckets to aggregate
* @avg_bw: new bw to sum aggregate
* @peak_bw: new bw to max aggregate
* @agg_avg: existing aggregate avg bw val
* @agg_peak: existing aggregate peak bw val
*/
static int qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
{
size_t i;
struct qcom_icc_node *qn;
qn = node->data;
if (!tag)
tag = RPM_ALWAYS_TAG;
for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
if (tag & BIT(i)) {
qn->sum_avg[i] += avg_bw;
qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
}
}
*agg_avg += avg_bw;
*agg_peak = max_t(u32, *agg_peak, peak_bw);
return 0;
}
/**
* qcom_icc_bus_aggregate - calculate bus clock rates by traversing all nodes
* @provider: generic interconnect provider
* @agg_clk_rate: array containing the aggregated clock rates in kHz
*/
static void qcom_icc_bus_aggregate(struct icc_provider *provider, u64 *agg_clk_rate)
{
u64 agg_avg_rate, agg_rate;
struct qcom_icc_node *qn;
struct icc_node *node;
int i;
/*
* Iterate nodes on the provider, aggregate bandwidth requests for
* every bucket and convert them into bus clock rates.
*/
list_for_each_entry(node, &provider->nodes, node_list) {
qn = node->data;
for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
if (qn->channels)
agg_avg_rate = div_u64(qn->sum_avg[i], qn->channels);
else
agg_avg_rate = qn->sum_avg[i];
agg_rate = max_t(u64, agg_avg_rate, qn->max_peak[i]);
do_div(agg_rate, qn->buswidth);
agg_clk_rate[i] = max_t(u64, agg_clk_rate[i], agg_rate);
}
}
}
static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
{
struct qcom_icc_node *src_qn = NULL, *dst_qn = NULL;
u64 agg_clk_rate[QCOM_SMD_RPM_STATE_NUM] = { 0 };
struct icc_provider *provider;
struct qcom_icc_provider *qp;
u64 active_rate, sleep_rate;
int ret;
src_qn = src->data;
if (dst)
dst_qn = dst->data;
provider = src->provider;
qp = to_qcom_provider(provider);
qcom_icc_bus_aggregate(provider, agg_clk_rate);
active_rate = agg_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE];
sleep_rate = agg_clk_rate[QCOM_SMD_RPM_SLEEP_STATE];
ret = qcom_icc_rpm_set(src_qn, src_qn->sum_avg);
if (ret)
return ret;
if (dst_qn) {
ret = qcom_icc_rpm_set(dst_qn, dst_qn->sum_avg);
if (ret)
return ret;
}
/* Some providers don't have a bus clock to scale */
if (!qp->bus_clk_desc && !qp->bus_clk)
return 0;
/*
* Downstream checks whether the requested rate is zero, but it makes little sense
* to vote for a value that's below the lower threshold, so let's not do so.
*/
if (qp->keep_alive)
active_rate = max(ICC_BUS_CLK_MIN_RATE, active_rate);
/* Some providers have a non-RPM-owned bus clock - convert kHz->Hz for the CCF */
if (qp->bus_clk) {
active_rate = max_t(u64, active_rate, sleep_rate);
/* ARM32 caps clk_set_rate arg to u32.. Nothing we can do about that! */
active_rate = min_t(u64, 1000ULL * active_rate, ULONG_MAX);
return clk_set_rate(qp->bus_clk, active_rate);
}
/* RPM only accepts <=INT_MAX rates */
active_rate = min_t(u64, active_rate, INT_MAX);
sleep_rate = min_t(u64, sleep_rate, INT_MAX);
if (active_rate != qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE]) {
ret = qcom_icc_rpm_set_bus_rate(qp->bus_clk_desc, QCOM_SMD_RPM_ACTIVE_STATE,
active_rate);
if (ret)
return ret;
/* Cache the rate after we've successfully commited it to RPM */
qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE] = active_rate;
}
if (sleep_rate != qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE]) {
ret = qcom_icc_rpm_set_bus_rate(qp->bus_clk_desc, QCOM_SMD_RPM_SLEEP_STATE,
sleep_rate);
if (ret)
return ret;
/* Cache the rate after we've successfully commited it to RPM */
qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE] = sleep_rate;
}
return 0;
}
int qnoc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct qcom_icc_desc *desc;
struct icc_onecell_data *data;
struct icc_provider *provider;
struct qcom_icc_node * const *qnodes;
struct qcom_icc_provider *qp;
struct icc_node *node;
size_t num_nodes, i;
const char * const *cds = NULL;
int cd_num;
int ret;
/* wait for the RPM proxy */
if (!qcom_icc_rpm_smd_available())
return -EPROBE_DEFER;
desc = of_device_get_match_data(dev);
if (!desc)
return -EINVAL;
qnodes = desc->nodes;
num_nodes = desc->num_nodes;
if (desc->num_intf_clocks) {
cds = desc->intf_clocks;
cd_num = desc->num_intf_clocks;
} else {
/* 0 intf clocks is perfectly fine */
cd_num = 0;
}
qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
if (!qp)
return -ENOMEM;
qp->intf_clks = devm_kcalloc(dev, cd_num, sizeof(*qp->intf_clks), GFP_KERNEL);
if (!qp->intf_clks)
return -ENOMEM;
if (desc->bus_clk_desc) {
qp->bus_clk_desc = devm_kzalloc(dev, sizeof(*qp->bus_clk_desc),
GFP_KERNEL);
if (!qp->bus_clk_desc)
return -ENOMEM;
qp->bus_clk_desc = desc->bus_clk_desc;
} else {
/* Some older SoCs may have a single non-RPM-owned bus clock. */
qp->bus_clk = devm_clk_get_optional(dev, "bus");
if (IS_ERR(qp->bus_clk))
return PTR_ERR(qp->bus_clk);
}
data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
GFP_KERNEL);
if (!data)
return -ENOMEM;
qp->num_intf_clks = cd_num;
for (i = 0; i < cd_num; i++)
qp->intf_clks[i].id = cds[i];
qp->keep_alive = desc->keep_alive;
qp->type = desc->type;
qp->qos_offset = desc->qos_offset;
if (desc->regmap_cfg) {
struct resource *res;
void __iomem *mmio;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
/* Try parent's regmap */
qp->regmap = dev_get_regmap(dev->parent, NULL);
if (qp->regmap)
goto regmap_done;
return -ENODEV;
}
mmio = devm_ioremap_resource(dev, res);
if (IS_ERR(mmio))
return PTR_ERR(mmio);
qp->regmap = devm_regmap_init_mmio(dev, mmio, desc->regmap_cfg);
if (IS_ERR(qp->regmap)) {
dev_err(dev, "Cannot regmap interconnect bus resource\n");
return PTR_ERR(qp->regmap);
}
}
regmap_done:
ret = clk_prepare_enable(qp->bus_clk);
if (ret)
return ret;
ret = devm_clk_bulk_get(dev, qp->num_intf_clks, qp->intf_clks);
if (ret)
return ret;
provider = &qp->provider;
provider->dev = dev;
provider->set = qcom_icc_set;
provider->pre_aggregate = qcom_icc_pre_bw_aggregate;
provider->aggregate = qcom_icc_bw_aggregate;
provider->xlate_extended = qcom_icc_xlate_extended;
provider->data = data;
icc_provider_init(provider);
/* If this fails, bus accesses will crash the platform! */
ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks);
if (ret)
return ret;
for (i = 0; i < num_nodes; i++) {
size_t j;
node = icc_node_create(qnodes[i]->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
goto err_remove_nodes;
}
node->name = qnodes[i]->name;
node->data = qnodes[i];
icc_node_add(node, provider);
for (j = 0; j < qnodes[i]->num_links; j++)
icc_link_create(node, qnodes[i]->links[j]);
/* Set QoS registers (we only need to do it once, generally) */
if (qnodes[i]->qos.ap_owned &&
qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) {
ret = qcom_icc_qos_set(node);
if (ret)
return ret;
}
data->nodes[i] = node;
}
data->num_nodes = num_nodes;
clk_bulk_disable_unprepare(qp->num_intf_clks, qp->intf_clks);
ret = icc_provider_register(provider);
if (ret)
goto err_remove_nodes;
platform_set_drvdata(pdev, qp);
/* Populate child NoC devices if any */
if (of_get_child_count(dev->of_node) > 0) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret)
goto err_deregister_provider;
}
return 0;
err_deregister_provider:
icc_provider_deregister(provider);
err_remove_nodes:
icc_nodes_remove(provider);
clk_disable_unprepare(qp->bus_clk);
return ret;
}
EXPORT_SYMBOL(qnoc_probe);
int qnoc_remove(struct platform_device *pdev)
{
struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
clk_disable_unprepare(qp->bus_clk);
return 0;
}
EXPORT_SYMBOL(qnoc_remove);
| linux-master | drivers/interconnect/qcom/icc-rpm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2020 Linaro Ltd
* Author: Georgi Djakov <[email protected]>
*/
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <dt-bindings/interconnect/qcom,msm8916.h>
#include "icc-rpm.h"
enum {
MSM8916_BIMC_SNOC_MAS = 1,
MSM8916_BIMC_SNOC_SLV,
MSM8916_MASTER_AMPSS_M0,
MSM8916_MASTER_LPASS,
MSM8916_MASTER_BLSP_1,
MSM8916_MASTER_DEHR,
MSM8916_MASTER_GRAPHICS_3D,
MSM8916_MASTER_JPEG,
MSM8916_MASTER_MDP_PORT0,
MSM8916_MASTER_CRYPTO_CORE0,
MSM8916_MASTER_SDCC_1,
MSM8916_MASTER_SDCC_2,
MSM8916_MASTER_QDSS_BAM,
MSM8916_MASTER_QDSS_ETR,
MSM8916_MASTER_SNOC_CFG,
MSM8916_MASTER_SPDM,
MSM8916_MASTER_TCU0,
MSM8916_MASTER_TCU1,
MSM8916_MASTER_USB_HS,
MSM8916_MASTER_VFE,
MSM8916_MASTER_VIDEO_P0,
MSM8916_SNOC_MM_INT_0,
MSM8916_SNOC_MM_INT_1,
MSM8916_SNOC_MM_INT_2,
MSM8916_SNOC_MM_INT_BIMC,
MSM8916_PNOC_INT_0,
MSM8916_PNOC_INT_1,
MSM8916_PNOC_MAS_0,
MSM8916_PNOC_MAS_1,
MSM8916_PNOC_SLV_0,
MSM8916_PNOC_SLV_1,
MSM8916_PNOC_SLV_2,
MSM8916_PNOC_SLV_3,
MSM8916_PNOC_SLV_4,
MSM8916_PNOC_SLV_8,
MSM8916_PNOC_SLV_9,
MSM8916_PNOC_SNOC_MAS,
MSM8916_PNOC_SNOC_SLV,
MSM8916_SNOC_QDSS_INT,
MSM8916_SLAVE_AMPSS_L2,
MSM8916_SLAVE_APSS,
MSM8916_SLAVE_LPASS,
MSM8916_SLAVE_BIMC_CFG,
MSM8916_SLAVE_BLSP_1,
MSM8916_SLAVE_BOOT_ROM,
MSM8916_SLAVE_CAMERA_CFG,
MSM8916_SLAVE_CATS_128,
MSM8916_SLAVE_OCMEM_64,
MSM8916_SLAVE_CLK_CTL,
MSM8916_SLAVE_CRYPTO_0_CFG,
MSM8916_SLAVE_DEHR_CFG,
MSM8916_SLAVE_DISPLAY_CFG,
MSM8916_SLAVE_EBI_CH0,
MSM8916_SLAVE_GRAPHICS_3D_CFG,
MSM8916_SLAVE_IMEM_CFG,
MSM8916_SLAVE_IMEM,
MSM8916_SLAVE_MPM,
MSM8916_SLAVE_MSG_RAM,
MSM8916_SLAVE_MSS,
MSM8916_SLAVE_PDM,
MSM8916_SLAVE_PMIC_ARB,
MSM8916_SLAVE_PNOC_CFG,
MSM8916_SLAVE_PRNG,
MSM8916_SLAVE_QDSS_CFG,
MSM8916_SLAVE_QDSS_STM,
MSM8916_SLAVE_RBCPR_CFG,
MSM8916_SLAVE_SDCC_1,
MSM8916_SLAVE_SDCC_2,
MSM8916_SLAVE_SECURITY,
MSM8916_SLAVE_SNOC_CFG,
MSM8916_SLAVE_SPDM,
MSM8916_SLAVE_SRVC_SNOC,
MSM8916_SLAVE_TCSR,
MSM8916_SLAVE_TLMM,
MSM8916_SLAVE_USB_HS,
MSM8916_SLAVE_VENUS_CFG,
MSM8916_SNOC_BIMC_0_MAS,
MSM8916_SNOC_BIMC_0_SLV,
MSM8916_SNOC_BIMC_1_MAS,
MSM8916_SNOC_BIMC_1_SLV,
MSM8916_SNOC_INT_0,
MSM8916_SNOC_INT_1,
MSM8916_SNOC_INT_BIMC,
MSM8916_SNOC_PNOC_MAS,
MSM8916_SNOC_PNOC_SLV,
};
static const u16 bimc_snoc_mas_links[] = {
MSM8916_BIMC_SNOC_SLV
};
static struct qcom_icc_node bimc_snoc_mas = {
.name = "bimc_snoc_mas",
.id = MSM8916_BIMC_SNOC_MAS,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(bimc_snoc_mas_links),
.links = bimc_snoc_mas_links,
};
static const u16 bimc_snoc_slv_links[] = {
MSM8916_SNOC_INT_0,
MSM8916_SNOC_INT_1
};
static struct qcom_icc_node bimc_snoc_slv = {
.name = "bimc_snoc_slv",
.id = MSM8916_BIMC_SNOC_SLV,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(bimc_snoc_slv_links),
.links = bimc_snoc_slv_links,
};
static const u16 mas_apss_links[] = {
MSM8916_SLAVE_EBI_CH0,
MSM8916_BIMC_SNOC_MAS,
MSM8916_SLAVE_AMPSS_L2
};
static struct qcom_icc_node mas_apss = {
.name = "mas_apss",
.id = MSM8916_MASTER_AMPSS_M0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_apss_links),
.links = mas_apss_links,
};
static const u16 mas_audio_links[] = {
MSM8916_PNOC_MAS_0
};
static struct qcom_icc_node mas_audio = {
.name = "mas_audio",
.id = MSM8916_MASTER_LPASS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_audio_links),
.links = mas_audio_links,
};
static const u16 mas_blsp_1_links[] = {
MSM8916_PNOC_MAS_1
};
static struct qcom_icc_node mas_blsp_1 = {
.name = "mas_blsp_1",
.id = MSM8916_MASTER_BLSP_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_blsp_1_links),
.links = mas_blsp_1_links,
};
static const u16 mas_dehr_links[] = {
MSM8916_PNOC_MAS_0
};
static struct qcom_icc_node mas_dehr = {
.name = "mas_dehr",
.id = MSM8916_MASTER_DEHR,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_dehr_links),
.links = mas_dehr_links,
};
static const u16 mas_gfx_links[] = {
MSM8916_SLAVE_EBI_CH0,
MSM8916_BIMC_SNOC_MAS,
MSM8916_SLAVE_AMPSS_L2
};
static struct qcom_icc_node mas_gfx = {
.name = "mas_gfx",
.id = MSM8916_MASTER_GRAPHICS_3D,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 2,
.num_links = ARRAY_SIZE(mas_gfx_links),
.links = mas_gfx_links,
};
static const u16 mas_jpeg_links[] = {
MSM8916_SNOC_MM_INT_0,
MSM8916_SNOC_MM_INT_2
};
static struct qcom_icc_node mas_jpeg = {
.name = "mas_jpeg",
.id = MSM8916_MASTER_JPEG,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 6,
.num_links = ARRAY_SIZE(mas_jpeg_links),
.links = mas_jpeg_links,
};
static const u16 mas_mdp_links[] = {
MSM8916_SNOC_MM_INT_0,
MSM8916_SNOC_MM_INT_2
};
static struct qcom_icc_node mas_mdp = {
.name = "mas_mdp",
.id = MSM8916_MASTER_MDP_PORT0,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 7,
.num_links = ARRAY_SIZE(mas_mdp_links),
.links = mas_mdp_links,
};
static const u16 mas_pcnoc_crypto_0_links[] = {
MSM8916_PNOC_INT_1
};
static struct qcom_icc_node mas_pcnoc_crypto_0 = {
.name = "mas_pcnoc_crypto_0",
.id = MSM8916_MASTER_CRYPTO_CORE0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pcnoc_crypto_0_links),
.links = mas_pcnoc_crypto_0_links,
};
static const u16 mas_pcnoc_sdcc_1_links[] = {
MSM8916_PNOC_INT_1
};
static struct qcom_icc_node mas_pcnoc_sdcc_1 = {
.name = "mas_pcnoc_sdcc_1",
.id = MSM8916_MASTER_SDCC_1,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pcnoc_sdcc_1_links),
.links = mas_pcnoc_sdcc_1_links,
};
static const u16 mas_pcnoc_sdcc_2_links[] = {
MSM8916_PNOC_INT_1
};
static struct qcom_icc_node mas_pcnoc_sdcc_2 = {
.name = "mas_pcnoc_sdcc_2",
.id = MSM8916_MASTER_SDCC_2,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pcnoc_sdcc_2_links),
.links = mas_pcnoc_sdcc_2_links,
};
static const u16 mas_qdss_bam_links[] = {
MSM8916_SNOC_QDSS_INT
};
static struct qcom_icc_node mas_qdss_bam = {
.name = "mas_qdss_bam",
.id = MSM8916_MASTER_QDSS_BAM,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 11,
.num_links = ARRAY_SIZE(mas_qdss_bam_links),
.links = mas_qdss_bam_links,
};
static const u16 mas_qdss_etr_links[] = {
MSM8916_SNOC_QDSS_INT
};
static struct qcom_icc_node mas_qdss_etr = {
.name = "mas_qdss_etr",
.id = MSM8916_MASTER_QDSS_ETR,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 10,
.num_links = ARRAY_SIZE(mas_qdss_etr_links),
.links = mas_qdss_etr_links,
};
static const u16 mas_snoc_cfg_links[] = {
MSM8916_SNOC_QDSS_INT
};
static struct qcom_icc_node mas_snoc_cfg = {
.name = "mas_snoc_cfg",
.id = MSM8916_MASTER_SNOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_cfg_links),
.links = mas_snoc_cfg_links,
};
static const u16 mas_spdm_links[] = {
MSM8916_PNOC_MAS_0
};
static struct qcom_icc_node mas_spdm = {
.name = "mas_spdm",
.id = MSM8916_MASTER_SPDM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_spdm_links),
.links = mas_spdm_links,
};
static const u16 mas_tcu0_links[] = {
MSM8916_SLAVE_EBI_CH0,
MSM8916_BIMC_SNOC_MAS,
MSM8916_SLAVE_AMPSS_L2
};
static struct qcom_icc_node mas_tcu0 = {
.name = "mas_tcu0",
.id = MSM8916_MASTER_TCU0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.qos.prio_level = 2,
.qos.qos_port = 5,
.num_links = ARRAY_SIZE(mas_tcu0_links),
.links = mas_tcu0_links,
};
static const u16 mas_tcu1_links[] = {
MSM8916_SLAVE_EBI_CH0,
MSM8916_BIMC_SNOC_MAS,
MSM8916_SLAVE_AMPSS_L2
};
static struct qcom_icc_node mas_tcu1 = {
.name = "mas_tcu1",
.id = MSM8916_MASTER_TCU1,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.qos.prio_level = 2,
.qos.qos_port = 6,
.num_links = ARRAY_SIZE(mas_tcu1_links),
.links = mas_tcu1_links,
};
static const u16 mas_usb_hs_links[] = {
MSM8916_PNOC_MAS_1
};
static struct qcom_icc_node mas_usb_hs = {
.name = "mas_usb_hs",
.id = MSM8916_MASTER_USB_HS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_usb_hs_links),
.links = mas_usb_hs_links,
};
static const u16 mas_vfe_links[] = {
MSM8916_SNOC_MM_INT_1,
MSM8916_SNOC_MM_INT_2
};
static struct qcom_icc_node mas_vfe = {
.name = "mas_vfe",
.id = MSM8916_MASTER_VFE,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 9,
.num_links = ARRAY_SIZE(mas_vfe_links),
.links = mas_vfe_links,
};
static const u16 mas_video_links[] = {
MSM8916_SNOC_MM_INT_0,
MSM8916_SNOC_MM_INT_2
};
static struct qcom_icc_node mas_video = {
.name = "mas_video",
.id = MSM8916_MASTER_VIDEO_P0,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 8,
.num_links = ARRAY_SIZE(mas_video_links),
.links = mas_video_links,
};
static const u16 mm_int_0_links[] = {
MSM8916_SNOC_MM_INT_BIMC
};
static struct qcom_icc_node mm_int_0 = {
.name = "mm_int_0",
.id = MSM8916_SNOC_MM_INT_0,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mm_int_0_links),
.links = mm_int_0_links,
};
static const u16 mm_int_1_links[] = {
MSM8916_SNOC_MM_INT_BIMC
};
static struct qcom_icc_node mm_int_1 = {
.name = "mm_int_1",
.id = MSM8916_SNOC_MM_INT_1,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mm_int_1_links),
.links = mm_int_1_links,
};
static const u16 mm_int_2_links[] = {
MSM8916_SNOC_INT_0
};
static struct qcom_icc_node mm_int_2 = {
.name = "mm_int_2",
.id = MSM8916_SNOC_MM_INT_2,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mm_int_2_links),
.links = mm_int_2_links,
};
static const u16 mm_int_bimc_links[] = {
MSM8916_SNOC_BIMC_1_MAS
};
static struct qcom_icc_node mm_int_bimc = {
.name = "mm_int_bimc",
.id = MSM8916_SNOC_MM_INT_BIMC,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mm_int_bimc_links),
.links = mm_int_bimc_links,
};
static const u16 pcnoc_int_0_links[] = {
MSM8916_PNOC_SNOC_MAS,
MSM8916_PNOC_SLV_0,
MSM8916_PNOC_SLV_1,
MSM8916_PNOC_SLV_2,
MSM8916_PNOC_SLV_3,
MSM8916_PNOC_SLV_4,
MSM8916_PNOC_SLV_8,
MSM8916_PNOC_SLV_9
};
static struct qcom_icc_node pcnoc_int_0 = {
.name = "pcnoc_int_0",
.id = MSM8916_PNOC_INT_0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_int_0_links),
.links = pcnoc_int_0_links,
};
static const u16 pcnoc_int_1_links[] = {
MSM8916_PNOC_SNOC_MAS
};
static struct qcom_icc_node pcnoc_int_1 = {
.name = "pcnoc_int_1",
.id = MSM8916_PNOC_INT_1,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_int_1_links),
.links = pcnoc_int_1_links,
};
static const u16 pcnoc_m_0_links[] = {
MSM8916_PNOC_INT_0
};
static struct qcom_icc_node pcnoc_m_0 = {
.name = "pcnoc_m_0",
.id = MSM8916_PNOC_MAS_0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_m_0_links),
.links = pcnoc_m_0_links,
};
static const u16 pcnoc_m_1_links[] = {
MSM8916_PNOC_SNOC_MAS
};
static struct qcom_icc_node pcnoc_m_1 = {
.name = "pcnoc_m_1",
.id = MSM8916_PNOC_MAS_1,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_m_1_links),
.links = pcnoc_m_1_links,
};
static const u16 pcnoc_s_0_links[] = {
MSM8916_SLAVE_CLK_CTL,
MSM8916_SLAVE_TLMM,
MSM8916_SLAVE_TCSR,
MSM8916_SLAVE_SECURITY,
MSM8916_SLAVE_MSS
};
static struct qcom_icc_node pcnoc_s_0 = {
.name = "pcnoc_s_0",
.id = MSM8916_PNOC_SLV_0,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_0_links),
.links = pcnoc_s_0_links,
};
static const u16 pcnoc_s_1_links[] = {
MSM8916_SLAVE_IMEM_CFG,
MSM8916_SLAVE_CRYPTO_0_CFG,
MSM8916_SLAVE_MSG_RAM,
MSM8916_SLAVE_PDM,
MSM8916_SLAVE_PRNG
};
static struct qcom_icc_node pcnoc_s_1 = {
.name = "pcnoc_s_1",
.id = MSM8916_PNOC_SLV_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_1_links),
.links = pcnoc_s_1_links,
};
static const u16 pcnoc_s_2_links[] = {
MSM8916_SLAVE_SPDM,
MSM8916_SLAVE_BOOT_ROM,
MSM8916_SLAVE_BIMC_CFG,
MSM8916_SLAVE_PNOC_CFG,
MSM8916_SLAVE_PMIC_ARB
};
static struct qcom_icc_node pcnoc_s_2 = {
.name = "pcnoc_s_2",
.id = MSM8916_PNOC_SLV_2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_2_links),
.links = pcnoc_s_2_links,
};
static const u16 pcnoc_s_3_links[] = {
MSM8916_SLAVE_MPM,
MSM8916_SLAVE_SNOC_CFG,
MSM8916_SLAVE_RBCPR_CFG,
MSM8916_SLAVE_QDSS_CFG,
MSM8916_SLAVE_DEHR_CFG
};
static struct qcom_icc_node pcnoc_s_3 = {
.name = "pcnoc_s_3",
.id = MSM8916_PNOC_SLV_3,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_3_links),
.links = pcnoc_s_3_links,
};
static const u16 pcnoc_s_4_links[] = {
MSM8916_SLAVE_VENUS_CFG,
MSM8916_SLAVE_CAMERA_CFG,
MSM8916_SLAVE_DISPLAY_CFG
};
static struct qcom_icc_node pcnoc_s_4 = {
.name = "pcnoc_s_4",
.id = MSM8916_PNOC_SLV_4,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_4_links),
.links = pcnoc_s_4_links,
};
static const u16 pcnoc_s_8_links[] = {
MSM8916_SLAVE_USB_HS,
MSM8916_SLAVE_SDCC_1,
MSM8916_SLAVE_BLSP_1
};
static struct qcom_icc_node pcnoc_s_8 = {
.name = "pcnoc_s_8",
.id = MSM8916_PNOC_SLV_8,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_8_links),
.links = pcnoc_s_8_links,
};
static const u16 pcnoc_s_9_links[] = {
MSM8916_SLAVE_SDCC_2,
MSM8916_SLAVE_LPASS,
MSM8916_SLAVE_GRAPHICS_3D_CFG
};
static struct qcom_icc_node pcnoc_s_9 = {
.name = "pcnoc_s_9",
.id = MSM8916_PNOC_SLV_9,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_9_links),
.links = pcnoc_s_9_links,
};
static const u16 pcnoc_snoc_mas_links[] = {
MSM8916_PNOC_SNOC_SLV
};
static struct qcom_icc_node pcnoc_snoc_mas = {
.name = "pcnoc_snoc_mas",
.id = MSM8916_PNOC_SNOC_MAS,
.buswidth = 8,
.mas_rpm_id = 29,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_snoc_mas_links),
.links = pcnoc_snoc_mas_links,
};
static const u16 pcnoc_snoc_slv_links[] = {
MSM8916_SNOC_INT_0,
MSM8916_SNOC_INT_BIMC,
MSM8916_SNOC_INT_1
};
static struct qcom_icc_node pcnoc_snoc_slv = {
.name = "pcnoc_snoc_slv",
.id = MSM8916_PNOC_SNOC_SLV,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 45,
.num_links = ARRAY_SIZE(pcnoc_snoc_slv_links),
.links = pcnoc_snoc_slv_links,
};
static const u16 qdss_int_links[] = {
MSM8916_SNOC_INT_0,
MSM8916_SNOC_INT_BIMC
};
static struct qcom_icc_node qdss_int = {
.name = "qdss_int",
.id = MSM8916_SNOC_QDSS_INT,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(qdss_int_links),
.links = qdss_int_links,
};
static struct qcom_icc_node slv_apps_l2 = {
.name = "slv_apps_l2",
.id = MSM8916_SLAVE_AMPSS_L2,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_apss = {
.name = "slv_apss",
.id = MSM8916_SLAVE_APSS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_audio = {
.name = "slv_audio",
.id = MSM8916_SLAVE_LPASS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_bimc_cfg = {
.name = "slv_bimc_cfg",
.id = MSM8916_SLAVE_BIMC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_blsp_1 = {
.name = "slv_blsp_1",
.id = MSM8916_SLAVE_BLSP_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_boot_rom = {
.name = "slv_boot_rom",
.id = MSM8916_SLAVE_BOOT_ROM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_camera_cfg = {
.name = "slv_camera_cfg",
.id = MSM8916_SLAVE_CAMERA_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_cats_0 = {
.name = "slv_cats_0",
.id = MSM8916_SLAVE_CATS_128,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_cats_1 = {
.name = "slv_cats_1",
.id = MSM8916_SLAVE_OCMEM_64,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_clk_ctl = {
.name = "slv_clk_ctl",
.id = MSM8916_SLAVE_CLK_CTL,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_crypto_0_cfg = {
.name = "slv_crypto_0_cfg",
.id = MSM8916_SLAVE_CRYPTO_0_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_dehr_cfg = {
.name = "slv_dehr_cfg",
.id = MSM8916_SLAVE_DEHR_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_display_cfg = {
.name = "slv_display_cfg",
.id = MSM8916_SLAVE_DISPLAY_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_ebi_ch0 = {
.name = "slv_ebi_ch0",
.id = MSM8916_SLAVE_EBI_CH0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 0,
};
static struct qcom_icc_node slv_gfx_cfg = {
.name = "slv_gfx_cfg",
.id = MSM8916_SLAVE_GRAPHICS_3D_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_imem_cfg = {
.name = "slv_imem_cfg",
.id = MSM8916_SLAVE_IMEM_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_imem = {
.name = "slv_imem",
.id = MSM8916_SLAVE_IMEM,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 26,
};
static struct qcom_icc_node slv_mpm = {
.name = "slv_mpm",
.id = MSM8916_SLAVE_MPM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_msg_ram = {
.name = "slv_msg_ram",
.id = MSM8916_SLAVE_MSG_RAM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_mss = {
.name = "slv_mss",
.id = MSM8916_SLAVE_MSS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_pdm = {
.name = "slv_pdm",
.id = MSM8916_SLAVE_PDM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_pmic_arb = {
.name = "slv_pmic_arb",
.id = MSM8916_SLAVE_PMIC_ARB,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_pcnoc_cfg = {
.name = "slv_pcnoc_cfg",
.id = MSM8916_SLAVE_PNOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_prng = {
.name = "slv_prng",
.id = MSM8916_SLAVE_PRNG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_qdss_cfg = {
.name = "slv_qdss_cfg",
.id = MSM8916_SLAVE_QDSS_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_qdss_stm = {
.name = "slv_qdss_stm",
.id = MSM8916_SLAVE_QDSS_STM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 30,
};
static struct qcom_icc_node slv_rbcpr_cfg = {
.name = "slv_rbcpr_cfg",
.id = MSM8916_SLAVE_RBCPR_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_sdcc_1 = {
.name = "slv_sdcc_1",
.id = MSM8916_SLAVE_SDCC_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_sdcc_2 = {
.name = "slv_sdcc_2",
.id = MSM8916_SLAVE_SDCC_2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_security = {
.name = "slv_security",
.id = MSM8916_SLAVE_SECURITY,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_snoc_cfg = {
.name = "slv_snoc_cfg",
.id = MSM8916_SLAVE_SNOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_spdm = {
.name = "slv_spdm",
.id = MSM8916_SLAVE_SPDM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_srvc_snoc = {
.name = "slv_srvc_snoc",
.id = MSM8916_SLAVE_SRVC_SNOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_tcsr = {
.name = "slv_tcsr",
.id = MSM8916_SLAVE_TCSR,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_tlmm = {
.name = "slv_tlmm",
.id = MSM8916_SLAVE_TLMM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_usb_hs = {
.name = "slv_usb_hs",
.id = MSM8916_SLAVE_USB_HS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_venus_cfg = {
.name = "slv_venus_cfg",
.id = MSM8916_SLAVE_VENUS_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static const u16 snoc_bimc_0_mas_links[] = {
MSM8916_SNOC_BIMC_0_SLV
};
static struct qcom_icc_node snoc_bimc_0_mas = {
.name = "snoc_bimc_0_mas",
.id = MSM8916_SNOC_BIMC_0_MAS,
.buswidth = 8,
.mas_rpm_id = 3,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(snoc_bimc_0_mas_links),
.links = snoc_bimc_0_mas_links,
};
static const u16 snoc_bimc_0_slv_links[] = {
MSM8916_SLAVE_EBI_CH0
};
static struct qcom_icc_node snoc_bimc_0_slv = {
.name = "snoc_bimc_0_slv",
.id = MSM8916_SNOC_BIMC_0_SLV,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 24,
.num_links = ARRAY_SIZE(snoc_bimc_0_slv_links),
.links = snoc_bimc_0_slv_links,
};
static const u16 snoc_bimc_1_mas_links[] = {
MSM8916_SNOC_BIMC_1_SLV
};
static struct qcom_icc_node snoc_bimc_1_mas = {
.name = "snoc_bimc_1_mas",
.id = MSM8916_SNOC_BIMC_1_MAS,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(snoc_bimc_1_mas_links),
.links = snoc_bimc_1_mas_links,
};
static const u16 snoc_bimc_1_slv_links[] = {
MSM8916_SLAVE_EBI_CH0
};
static struct qcom_icc_node snoc_bimc_1_slv = {
.name = "snoc_bimc_1_slv",
.id = MSM8916_SNOC_BIMC_1_SLV,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(snoc_bimc_1_slv_links),
.links = snoc_bimc_1_slv_links,
};
static const u16 snoc_int_0_links[] = {
MSM8916_SLAVE_QDSS_STM,
MSM8916_SLAVE_IMEM,
MSM8916_SNOC_PNOC_MAS
};
static struct qcom_icc_node snoc_int_0 = {
.name = "snoc_int_0",
.id = MSM8916_SNOC_INT_0,
.buswidth = 8,
.mas_rpm_id = 99,
.slv_rpm_id = 130,
.num_links = ARRAY_SIZE(snoc_int_0_links),
.links = snoc_int_0_links,
};
static const u16 snoc_int_1_links[] = {
MSM8916_SLAVE_APSS,
MSM8916_SLAVE_CATS_128,
MSM8916_SLAVE_OCMEM_64
};
static struct qcom_icc_node snoc_int_1 = {
.name = "snoc_int_1",
.id = MSM8916_SNOC_INT_1,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(snoc_int_1_links),
.links = snoc_int_1_links,
};
static const u16 snoc_int_bimc_links[] = {
MSM8916_SNOC_BIMC_0_MAS
};
static struct qcom_icc_node snoc_int_bimc = {
.name = "snoc_int_bimc",
.id = MSM8916_SNOC_INT_BIMC,
.buswidth = 8,
.mas_rpm_id = 101,
.slv_rpm_id = 132,
.num_links = ARRAY_SIZE(snoc_int_bimc_links),
.links = snoc_int_bimc_links,
};
static const u16 snoc_pcnoc_mas_links[] = {
MSM8916_SNOC_PNOC_SLV
};
static struct qcom_icc_node snoc_pcnoc_mas = {
.name = "snoc_pcnoc_mas",
.id = MSM8916_SNOC_PNOC_MAS,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(snoc_pcnoc_mas_links),
.links = snoc_pcnoc_mas_links,
};
static const u16 snoc_pcnoc_slv_links[] = {
MSM8916_PNOC_INT_0
};
static struct qcom_icc_node snoc_pcnoc_slv = {
.name = "snoc_pcnoc_slv",
.id = MSM8916_SNOC_PNOC_SLV,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(snoc_pcnoc_slv_links),
.links = snoc_pcnoc_slv_links,
};
static struct qcom_icc_node * const msm8916_snoc_nodes[] = {
[BIMC_SNOC_SLV] = &bimc_snoc_slv,
[MASTER_JPEG] = &mas_jpeg,
[MASTER_MDP_PORT0] = &mas_mdp,
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_QDSS_ETR] = &mas_qdss_etr,
[MASTER_SNOC_CFG] = &mas_snoc_cfg,
[MASTER_VFE] = &mas_vfe,
[MASTER_VIDEO_P0] = &mas_video,
[SNOC_MM_INT_0] = &mm_int_0,
[SNOC_MM_INT_1] = &mm_int_1,
[SNOC_MM_INT_2] = &mm_int_2,
[SNOC_MM_INT_BIMC] = &mm_int_bimc,
[PCNOC_SNOC_SLV] = &pcnoc_snoc_slv,
[SLAVE_APSS] = &slv_apss,
[SLAVE_CATS_128] = &slv_cats_0,
[SLAVE_OCMEM_64] = &slv_cats_1,
[SLAVE_IMEM] = &slv_imem,
[SLAVE_QDSS_STM] = &slv_qdss_stm,
[SLAVE_SRVC_SNOC] = &slv_srvc_snoc,
[SNOC_BIMC_0_MAS] = &snoc_bimc_0_mas,
[SNOC_BIMC_1_MAS] = &snoc_bimc_1_mas,
[SNOC_INT_0] = &snoc_int_0,
[SNOC_INT_1] = &snoc_int_1,
[SNOC_INT_BIMC] = &snoc_int_bimc,
[SNOC_PCNOC_MAS] = &snoc_pcnoc_mas,
[SNOC_QDSS_INT] = &qdss_int,
};
static const struct regmap_config msm8916_snoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x14000,
.fast_io = true,
};
static const struct qcom_icc_desc msm8916_snoc = {
.type = QCOM_ICC_NOC,
.nodes = msm8916_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_snoc_nodes),
.bus_clk_desc = &bus_1_clk,
.regmap_cfg = &msm8916_snoc_regmap_config,
.qos_offset = 0x7000,
};
static struct qcom_icc_node * const msm8916_bimc_nodes[] = {
[BIMC_SNOC_MAS] = &bimc_snoc_mas,
[MASTER_AMPSS_M0] = &mas_apss,
[MASTER_GRAPHICS_3D] = &mas_gfx,
[MASTER_TCU0] = &mas_tcu0,
[MASTER_TCU1] = &mas_tcu1,
[SLAVE_AMPSS_L2] = &slv_apps_l2,
[SLAVE_EBI_CH0] = &slv_ebi_ch0,
[SNOC_BIMC_0_SLV] = &snoc_bimc_0_slv,
[SNOC_BIMC_1_SLV] = &snoc_bimc_1_slv,
};
static const struct regmap_config msm8916_bimc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x62000,
.fast_io = true,
};
static const struct qcom_icc_desc msm8916_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = msm8916_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_bimc_nodes),
.bus_clk_desc = &bimc_clk,
.regmap_cfg = &msm8916_bimc_regmap_config,
.qos_offset = 0x8000,
};
static struct qcom_icc_node * const msm8916_pcnoc_nodes[] = {
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_DEHR] = &mas_dehr,
[MASTER_LPASS] = &mas_audio,
[MASTER_CRYPTO_CORE0] = &mas_pcnoc_crypto_0,
[MASTER_SDCC_1] = &mas_pcnoc_sdcc_1,
[MASTER_SDCC_2] = &mas_pcnoc_sdcc_2,
[MASTER_SPDM] = &mas_spdm,
[MASTER_USB_HS] = &mas_usb_hs,
[PCNOC_INT_0] = &pcnoc_int_0,
[PCNOC_INT_1] = &pcnoc_int_1,
[PCNOC_MAS_0] = &pcnoc_m_0,
[PCNOC_MAS_1] = &pcnoc_m_1,
[PCNOC_SLV_0] = &pcnoc_s_0,
[PCNOC_SLV_1] = &pcnoc_s_1,
[PCNOC_SLV_2] = &pcnoc_s_2,
[PCNOC_SLV_3] = &pcnoc_s_3,
[PCNOC_SLV_4] = &pcnoc_s_4,
[PCNOC_SLV_8] = &pcnoc_s_8,
[PCNOC_SLV_9] = &pcnoc_s_9,
[PCNOC_SNOC_MAS] = &pcnoc_snoc_mas,
[SLAVE_BIMC_CFG] = &slv_bimc_cfg,
[SLAVE_BLSP_1] = &slv_blsp_1,
[SLAVE_BOOT_ROM] = &slv_boot_rom,
[SLAVE_CAMERA_CFG] = &slv_camera_cfg,
[SLAVE_CLK_CTL] = &slv_clk_ctl,
[SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
[SLAVE_DEHR_CFG] = &slv_dehr_cfg,
[SLAVE_DISPLAY_CFG] = &slv_display_cfg,
[SLAVE_GRAPHICS_3D_CFG] = &slv_gfx_cfg,
[SLAVE_IMEM_CFG] = &slv_imem_cfg,
[SLAVE_LPASS] = &slv_audio,
[SLAVE_MPM] = &slv_mpm,
[SLAVE_MSG_RAM] = &slv_msg_ram,
[SLAVE_MSS] = &slv_mss,
[SLAVE_PDM] = &slv_pdm,
[SLAVE_PMIC_ARB] = &slv_pmic_arb,
[SLAVE_PCNOC_CFG] = &slv_pcnoc_cfg,
[SLAVE_PRNG] = &slv_prng,
[SLAVE_QDSS_CFG] = &slv_qdss_cfg,
[SLAVE_RBCPR_CFG] = &slv_rbcpr_cfg,
[SLAVE_SDCC_1] = &slv_sdcc_1,
[SLAVE_SDCC_2] = &slv_sdcc_2,
[SLAVE_SECURITY] = &slv_security,
[SLAVE_SNOC_CFG] = &slv_snoc_cfg,
[SLAVE_SPDM] = &slv_spdm,
[SLAVE_TCSR] = &slv_tcsr,
[SLAVE_TLMM] = &slv_tlmm,
[SLAVE_USB_HS] = &slv_usb_hs,
[SLAVE_VENUS_CFG] = &slv_venus_cfg,
[SNOC_PCNOC_SLV] = &snoc_pcnoc_slv,
};
static const struct regmap_config msm8916_pcnoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x11000,
.fast_io = true,
};
static const struct qcom_icc_desc msm8916_pcnoc = {
.type = QCOM_ICC_NOC,
.nodes = msm8916_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes),
.bus_clk_desc = &bus_0_clk,
.regmap_cfg = &msm8916_pcnoc_regmap_config,
.qos_offset = 0x7000,
};
static const struct of_device_id msm8916_noc_of_match[] = {
{ .compatible = "qcom,msm8916-bimc", .data = &msm8916_bimc },
{ .compatible = "qcom,msm8916-pcnoc", .data = &msm8916_pcnoc },
{ .compatible = "qcom,msm8916-snoc", .data = &msm8916_snoc },
{ }
};
MODULE_DEVICE_TABLE(of, msm8916_noc_of_match);
static struct platform_driver msm8916_noc_driver = {
.probe = qnoc_probe,
.remove = qnoc_remove,
.driver = {
.name = "qnoc-msm8916",
.of_match_table = msm8916_noc_of_match,
},
};
module_platform_driver(msm8916_noc_driver);
MODULE_AUTHOR("Georgi Djakov <[email protected]>");
MODULE_DESCRIPTION("Qualcomm MSM8916 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/msm8916.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Linaro Ltd
* Author: Jun Nie <[email protected]>
* With reference of msm8916 interconnect driver of Georgi Djakov.
*/
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <dt-bindings/interconnect/qcom,msm8939.h>
#include "icc-rpm.h"
enum {
MSM8939_BIMC_SNOC_MAS = 1,
MSM8939_BIMC_SNOC_SLV,
MSM8939_MASTER_AMPSS_M0,
MSM8939_MASTER_LPASS,
MSM8939_MASTER_BLSP_1,
MSM8939_MASTER_DEHR,
MSM8939_MASTER_GRAPHICS_3D,
MSM8939_MASTER_JPEG,
MSM8939_MASTER_MDP_PORT0,
MSM8939_MASTER_MDP_PORT1,
MSM8939_MASTER_CPP,
MSM8939_MASTER_CRYPTO_CORE0,
MSM8939_MASTER_SDCC_1,
MSM8939_MASTER_SDCC_2,
MSM8939_MASTER_QDSS_BAM,
MSM8939_MASTER_QDSS_ETR,
MSM8939_MASTER_SNOC_CFG,
MSM8939_MASTER_SPDM,
MSM8939_MASTER_TCU0,
MSM8939_MASTER_USB_HS1,
MSM8939_MASTER_USB_HS2,
MSM8939_MASTER_VFE,
MSM8939_MASTER_VIDEO_P0,
MSM8939_SNOC_MM_INT_0,
MSM8939_SNOC_MM_INT_1,
MSM8939_SNOC_MM_INT_2,
MSM8939_PNOC_INT_0,
MSM8939_PNOC_INT_1,
MSM8939_PNOC_MAS_0,
MSM8939_PNOC_MAS_1,
MSM8939_PNOC_SLV_0,
MSM8939_PNOC_SLV_1,
MSM8939_PNOC_SLV_2,
MSM8939_PNOC_SLV_3,
MSM8939_PNOC_SLV_4,
MSM8939_PNOC_SLV_8,
MSM8939_PNOC_SLV_9,
MSM8939_PNOC_SNOC_MAS,
MSM8939_PNOC_SNOC_SLV,
MSM8939_SNOC_QDSS_INT,
MSM8939_SLAVE_AMPSS_L2,
MSM8939_SLAVE_APSS,
MSM8939_SLAVE_LPASS,
MSM8939_SLAVE_BIMC_CFG,
MSM8939_SLAVE_BLSP_1,
MSM8939_SLAVE_BOOT_ROM,
MSM8939_SLAVE_CAMERA_CFG,
MSM8939_SLAVE_CATS_128,
MSM8939_SLAVE_OCMEM_64,
MSM8939_SLAVE_CLK_CTL,
MSM8939_SLAVE_CRYPTO_0_CFG,
MSM8939_SLAVE_DEHR_CFG,
MSM8939_SLAVE_DISPLAY_CFG,
MSM8939_SLAVE_EBI_CH0,
MSM8939_SLAVE_GRAPHICS_3D_CFG,
MSM8939_SLAVE_IMEM_CFG,
MSM8939_SLAVE_IMEM,
MSM8939_SLAVE_MPM,
MSM8939_SLAVE_MSG_RAM,
MSM8939_SLAVE_MSS,
MSM8939_SLAVE_PDM,
MSM8939_SLAVE_PMIC_ARB,
MSM8939_SLAVE_PNOC_CFG,
MSM8939_SLAVE_PRNG,
MSM8939_SLAVE_QDSS_CFG,
MSM8939_SLAVE_QDSS_STM,
MSM8939_SLAVE_RBCPR_CFG,
MSM8939_SLAVE_SDCC_1,
MSM8939_SLAVE_SDCC_2,
MSM8939_SLAVE_SECURITY,
MSM8939_SLAVE_SNOC_CFG,
MSM8939_SLAVE_SPDM,
MSM8939_SLAVE_SRVC_SNOC,
MSM8939_SLAVE_TCSR,
MSM8939_SLAVE_TLMM,
MSM8939_SLAVE_USB_HS1,
MSM8939_SLAVE_USB_HS2,
MSM8939_SLAVE_VENUS_CFG,
MSM8939_SNOC_BIMC_0_MAS,
MSM8939_SNOC_BIMC_0_SLV,
MSM8939_SNOC_BIMC_1_MAS,
MSM8939_SNOC_BIMC_1_SLV,
MSM8939_SNOC_BIMC_2_MAS,
MSM8939_SNOC_BIMC_2_SLV,
MSM8939_SNOC_INT_0,
MSM8939_SNOC_INT_1,
MSM8939_SNOC_INT_BIMC,
MSM8939_SNOC_PNOC_MAS,
MSM8939_SNOC_PNOC_SLV,
};
static const u16 bimc_snoc_mas_links[] = {
MSM8939_BIMC_SNOC_SLV
};
static struct qcom_icc_node bimc_snoc_mas = {
.name = "bimc_snoc_mas",
.id = MSM8939_BIMC_SNOC_MAS,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(bimc_snoc_mas_links),
.links = bimc_snoc_mas_links,
};
static const u16 bimc_snoc_slv_links[] = {
MSM8939_SNOC_INT_0,
MSM8939_SNOC_INT_1
};
static struct qcom_icc_node bimc_snoc_slv = {
.name = "bimc_snoc_slv",
.id = MSM8939_BIMC_SNOC_SLV,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 2,
.num_links = ARRAY_SIZE(bimc_snoc_slv_links),
.links = bimc_snoc_slv_links,
};
static const u16 mas_apss_links[] = {
MSM8939_SLAVE_EBI_CH0,
MSM8939_BIMC_SNOC_MAS,
MSM8939_SLAVE_AMPSS_L2
};
static struct qcom_icc_node mas_apss = {
.name = "mas_apss",
.id = MSM8939_MASTER_AMPSS_M0,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 0,
.num_links = ARRAY_SIZE(mas_apss_links),
.links = mas_apss_links,
};
static const u16 mas_audio_links[] = {
MSM8939_PNOC_MAS_0
};
static struct qcom_icc_node mas_audio = {
.name = "mas_audio",
.id = MSM8939_MASTER_LPASS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_audio_links),
.links = mas_audio_links,
};
static const u16 mas_blsp_1_links[] = {
MSM8939_PNOC_MAS_1
};
static struct qcom_icc_node mas_blsp_1 = {
.name = "mas_blsp_1",
.id = MSM8939_MASTER_BLSP_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_blsp_1_links),
.links = mas_blsp_1_links,
};
static const u16 mas_dehr_links[] = {
MSM8939_PNOC_MAS_0
};
static struct qcom_icc_node mas_dehr = {
.name = "mas_dehr",
.id = MSM8939_MASTER_DEHR,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_dehr_links),
.links = mas_dehr_links,
};
static const u16 mas_gfx_links[] = {
MSM8939_SLAVE_EBI_CH0,
MSM8939_BIMC_SNOC_MAS,
MSM8939_SLAVE_AMPSS_L2
};
static struct qcom_icc_node mas_gfx = {
.name = "mas_gfx",
.id = MSM8939_MASTER_GRAPHICS_3D,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 2,
.num_links = ARRAY_SIZE(mas_gfx_links),
.links = mas_gfx_links,
};
static const u16 mas_jpeg_links[] = {
MSM8939_SNOC_MM_INT_0,
MSM8939_SNOC_MM_INT_2
};
static struct qcom_icc_node mas_jpeg = {
.name = "mas_jpeg",
.id = MSM8939_MASTER_JPEG,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 6,
.num_links = ARRAY_SIZE(mas_jpeg_links),
.links = mas_jpeg_links,
};
static const u16 mas_mdp0_links[] = {
MSM8939_SNOC_MM_INT_1,
MSM8939_SNOC_MM_INT_2
};
static struct qcom_icc_node mas_mdp0 = {
.name = "mas_mdp0",
.id = MSM8939_MASTER_MDP_PORT0,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 7,
.num_links = ARRAY_SIZE(mas_mdp0_links),
.links = mas_mdp0_links,
};
static const u16 mas_mdp1_links[] = {
MSM8939_SNOC_MM_INT_0,
MSM8939_SNOC_MM_INT_2
};
static struct qcom_icc_node mas_mdp1 = {
.name = "mas_mdp1",
.id = MSM8939_MASTER_MDP_PORT1,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 13,
.num_links = ARRAY_SIZE(mas_mdp1_links),
.links = mas_mdp1_links,
};
static const u16 mas_cpp_links[] = {
MSM8939_SNOC_MM_INT_0,
MSM8939_SNOC_MM_INT_2
};
static struct qcom_icc_node mas_cpp = {
.name = "mas_cpp",
.id = MSM8939_MASTER_CPP,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 12,
.num_links = ARRAY_SIZE(mas_cpp_links),
.links = mas_cpp_links,
};
static const u16 mas_pcnoc_crypto_0_links[] = {
MSM8939_PNOC_INT_1
};
static struct qcom_icc_node mas_pcnoc_crypto_0 = {
.name = "mas_pcnoc_crypto_0",
.id = MSM8939_MASTER_CRYPTO_CORE0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pcnoc_crypto_0_links),
.links = mas_pcnoc_crypto_0_links,
};
static const u16 mas_pcnoc_sdcc_1_links[] = {
MSM8939_PNOC_INT_1
};
static struct qcom_icc_node mas_pcnoc_sdcc_1 = {
.name = "mas_pcnoc_sdcc_1",
.id = MSM8939_MASTER_SDCC_1,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pcnoc_sdcc_1_links),
.links = mas_pcnoc_sdcc_1_links,
};
static const u16 mas_pcnoc_sdcc_2_links[] = {
MSM8939_PNOC_INT_1
};
static struct qcom_icc_node mas_pcnoc_sdcc_2 = {
.name = "mas_pcnoc_sdcc_2",
.id = MSM8939_MASTER_SDCC_2,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_pcnoc_sdcc_2_links),
.links = mas_pcnoc_sdcc_2_links,
};
static const u16 mas_qdss_bam_links[] = {
MSM8939_SNOC_QDSS_INT
};
static struct qcom_icc_node mas_qdss_bam = {
.name = "mas_qdss_bam",
.id = MSM8939_MASTER_QDSS_BAM,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 11,
.num_links = ARRAY_SIZE(mas_qdss_bam_links),
.links = mas_qdss_bam_links,
};
static const u16 mas_qdss_etr_links[] = {
MSM8939_SNOC_QDSS_INT
};
static struct qcom_icc_node mas_qdss_etr = {
.name = "mas_qdss_etr",
.id = MSM8939_MASTER_QDSS_ETR,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 1,
.qos.prio_level = 1,
.qos.qos_port = 10,
.num_links = ARRAY_SIZE(mas_qdss_etr_links),
.links = mas_qdss_etr_links,
};
static const u16 mas_snoc_cfg_links[] = {
MSM8939_SLAVE_SRVC_SNOC
};
static struct qcom_icc_node mas_snoc_cfg = {
.name = "mas_snoc_cfg",
.id = MSM8939_MASTER_SNOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_snoc_cfg_links),
.links = mas_snoc_cfg_links,
};
static const u16 mas_spdm_links[] = {
MSM8939_PNOC_MAS_0
};
static struct qcom_icc_node mas_spdm = {
.name = "mas_spdm",
.id = MSM8939_MASTER_SPDM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_spdm_links),
.links = mas_spdm_links,
};
static const u16 mas_tcu0_links[] = {
MSM8939_SLAVE_EBI_CH0,
MSM8939_BIMC_SNOC_MAS,
MSM8939_SLAVE_AMPSS_L2
};
static struct qcom_icc_node mas_tcu0 = {
.name = "mas_tcu0",
.id = MSM8939_MASTER_TCU0,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_FIXED,
.qos.areq_prio = 2,
.qos.prio_level = 2,
.qos.qos_port = 6,
.num_links = ARRAY_SIZE(mas_tcu0_links),
.links = mas_tcu0_links,
};
static const u16 mas_usb_hs1_links[] = {
MSM8939_PNOC_MAS_1
};
static struct qcom_icc_node mas_usb_hs1 = {
.name = "mas_usb_hs1",
.id = MSM8939_MASTER_USB_HS1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_usb_hs1_links),
.links = mas_usb_hs1_links,
};
static const u16 mas_usb_hs2_links[] = {
MSM8939_PNOC_MAS_1
};
static struct qcom_icc_node mas_usb_hs2 = {
.name = "mas_usb_hs2",
.id = MSM8939_MASTER_USB_HS2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(mas_usb_hs2_links),
.links = mas_usb_hs2_links,
};
static const u16 mas_vfe_links[] = {
MSM8939_SNOC_MM_INT_1,
MSM8939_SNOC_MM_INT_2
};
static struct qcom_icc_node mas_vfe = {
.name = "mas_vfe",
.id = MSM8939_MASTER_VFE,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 9,
.num_links = ARRAY_SIZE(mas_vfe_links),
.links = mas_vfe_links,
};
static const u16 mas_video_links[] = {
MSM8939_SNOC_MM_INT_0,
MSM8939_SNOC_MM_INT_2
};
static struct qcom_icc_node mas_video = {
.name = "mas_video",
.id = MSM8939_MASTER_VIDEO_P0,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.qos.areq_prio = 0,
.qos.prio_level = 0,
.qos.qos_port = 8,
.num_links = ARRAY_SIZE(mas_video_links),
.links = mas_video_links,
};
static const u16 mm_int_0_links[] = {
MSM8939_SNOC_BIMC_2_MAS
};
static struct qcom_icc_node mm_int_0 = {
.name = "mm_int_0",
.id = MSM8939_SNOC_MM_INT_0,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mm_int_0_links),
.links = mm_int_0_links,
};
static const u16 mm_int_1_links[] = {
MSM8939_SNOC_BIMC_1_MAS
};
static struct qcom_icc_node mm_int_1 = {
.name = "mm_int_1",
.id = MSM8939_SNOC_MM_INT_1,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mm_int_1_links),
.links = mm_int_1_links,
};
static const u16 mm_int_2_links[] = {
MSM8939_SNOC_INT_0
};
static struct qcom_icc_node mm_int_2 = {
.name = "mm_int_2",
.id = MSM8939_SNOC_MM_INT_2,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(mm_int_2_links),
.links = mm_int_2_links,
};
static const u16 pcnoc_int_0_links[] = {
MSM8939_PNOC_SNOC_MAS,
MSM8939_PNOC_SLV_0,
MSM8939_PNOC_SLV_1,
MSM8939_PNOC_SLV_2,
MSM8939_PNOC_SLV_3,
MSM8939_PNOC_SLV_4,
MSM8939_PNOC_SLV_8,
MSM8939_PNOC_SLV_9
};
static struct qcom_icc_node pcnoc_int_0 = {
.name = "pcnoc_int_0",
.id = MSM8939_PNOC_INT_0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_int_0_links),
.links = pcnoc_int_0_links,
};
static const u16 pcnoc_int_1_links[] = {
MSM8939_PNOC_SNOC_MAS
};
static struct qcom_icc_node pcnoc_int_1 = {
.name = "pcnoc_int_1",
.id = MSM8939_PNOC_INT_1,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_int_1_links),
.links = pcnoc_int_1_links,
};
static const u16 pcnoc_m_0_links[] = {
MSM8939_PNOC_INT_0
};
static struct qcom_icc_node pcnoc_m_0 = {
.name = "pcnoc_m_0",
.id = MSM8939_PNOC_MAS_0,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_m_0_links),
.links = pcnoc_m_0_links,
};
static const u16 pcnoc_m_1_links[] = {
MSM8939_PNOC_SNOC_MAS
};
static struct qcom_icc_node pcnoc_m_1 = {
.name = "pcnoc_m_1",
.id = MSM8939_PNOC_MAS_1,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_m_1_links),
.links = pcnoc_m_1_links,
};
static const u16 pcnoc_s_0_links[] = {
MSM8939_SLAVE_CLK_CTL,
MSM8939_SLAVE_TLMM,
MSM8939_SLAVE_TCSR,
MSM8939_SLAVE_SECURITY,
MSM8939_SLAVE_MSS
};
static struct qcom_icc_node pcnoc_s_0 = {
.name = "pcnoc_s_0",
.id = MSM8939_PNOC_SLV_0,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_0_links),
.links = pcnoc_s_0_links,
};
static const u16 pcnoc_s_1_links[] = {
MSM8939_SLAVE_IMEM_CFG,
MSM8939_SLAVE_CRYPTO_0_CFG,
MSM8939_SLAVE_MSG_RAM,
MSM8939_SLAVE_PDM,
MSM8939_SLAVE_PRNG
};
static struct qcom_icc_node pcnoc_s_1 = {
.name = "pcnoc_s_1",
.id = MSM8939_PNOC_SLV_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_1_links),
.links = pcnoc_s_1_links,
};
static const u16 pcnoc_s_2_links[] = {
MSM8939_SLAVE_SPDM,
MSM8939_SLAVE_BOOT_ROM,
MSM8939_SLAVE_BIMC_CFG,
MSM8939_SLAVE_PNOC_CFG,
MSM8939_SLAVE_PMIC_ARB
};
static struct qcom_icc_node pcnoc_s_2 = {
.name = "pcnoc_s_2",
.id = MSM8939_PNOC_SLV_2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_2_links),
.links = pcnoc_s_2_links,
};
static const u16 pcnoc_s_3_links[] = {
MSM8939_SLAVE_MPM,
MSM8939_SLAVE_SNOC_CFG,
MSM8939_SLAVE_RBCPR_CFG,
MSM8939_SLAVE_QDSS_CFG,
MSM8939_SLAVE_DEHR_CFG
};
static struct qcom_icc_node pcnoc_s_3 = {
.name = "pcnoc_s_3",
.id = MSM8939_PNOC_SLV_3,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_3_links),
.links = pcnoc_s_3_links,
};
static const u16 pcnoc_s_4_links[] = {
MSM8939_SLAVE_VENUS_CFG,
MSM8939_SLAVE_CAMERA_CFG,
MSM8939_SLAVE_DISPLAY_CFG
};
static struct qcom_icc_node pcnoc_s_4 = {
.name = "pcnoc_s_4",
.id = MSM8939_PNOC_SLV_4,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_4_links),
.links = pcnoc_s_4_links,
};
static const u16 pcnoc_s_8_links[] = {
MSM8939_SLAVE_USB_HS1,
MSM8939_SLAVE_SDCC_1,
MSM8939_SLAVE_BLSP_1
};
static struct qcom_icc_node pcnoc_s_8 = {
.name = "pcnoc_s_8",
.id = MSM8939_PNOC_SLV_8,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_8_links),
.links = pcnoc_s_8_links,
};
static const u16 pcnoc_s_9_links[] = {
MSM8939_SLAVE_SDCC_2,
MSM8939_SLAVE_LPASS,
MSM8939_SLAVE_USB_HS2
};
static struct qcom_icc_node pcnoc_s_9 = {
.name = "pcnoc_s_9",
.id = MSM8939_PNOC_SLV_9,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_s_9_links),
.links = pcnoc_s_9_links,
};
static const u16 pcnoc_snoc_mas_links[] = {
MSM8939_PNOC_SNOC_SLV
};
static struct qcom_icc_node pcnoc_snoc_mas = {
.name = "pcnoc_snoc_mas",
.id = MSM8939_PNOC_SNOC_MAS,
.buswidth = 8,
.mas_rpm_id = 29,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(pcnoc_snoc_mas_links),
.links = pcnoc_snoc_mas_links,
};
static const u16 pcnoc_snoc_slv_links[] = {
MSM8939_SNOC_INT_0,
MSM8939_SNOC_INT_BIMC,
MSM8939_SNOC_INT_1
};
static struct qcom_icc_node pcnoc_snoc_slv = {
.name = "pcnoc_snoc_slv",
.id = MSM8939_PNOC_SNOC_SLV,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 45,
.num_links = ARRAY_SIZE(pcnoc_snoc_slv_links),
.links = pcnoc_snoc_slv_links,
};
static const u16 qdss_int_links[] = {
MSM8939_SNOC_INT_0,
MSM8939_SNOC_INT_BIMC
};
static struct qcom_icc_node qdss_int = {
.name = "qdss_int",
.id = MSM8939_SNOC_QDSS_INT,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(qdss_int_links),
.links = qdss_int_links,
};
static struct qcom_icc_node slv_apps_l2 = {
.name = "slv_apps_l2",
.id = MSM8939_SLAVE_AMPSS_L2,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_apss = {
.name = "slv_apss",
.id = MSM8939_SLAVE_APSS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_audio = {
.name = "slv_audio",
.id = MSM8939_SLAVE_LPASS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_bimc_cfg = {
.name = "slv_bimc_cfg",
.id = MSM8939_SLAVE_BIMC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_blsp_1 = {
.name = "slv_blsp_1",
.id = MSM8939_SLAVE_BLSP_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_boot_rom = {
.name = "slv_boot_rom",
.id = MSM8939_SLAVE_BOOT_ROM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_camera_cfg = {
.name = "slv_camera_cfg",
.id = MSM8939_SLAVE_CAMERA_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_cats_0 = {
.name = "slv_cats_0",
.id = MSM8939_SLAVE_CATS_128,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_cats_1 = {
.name = "slv_cats_1",
.id = MSM8939_SLAVE_OCMEM_64,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_clk_ctl = {
.name = "slv_clk_ctl",
.id = MSM8939_SLAVE_CLK_CTL,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_crypto_0_cfg = {
.name = "slv_crypto_0_cfg",
.id = MSM8939_SLAVE_CRYPTO_0_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_dehr_cfg = {
.name = "slv_dehr_cfg",
.id = MSM8939_SLAVE_DEHR_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_display_cfg = {
.name = "slv_display_cfg",
.id = MSM8939_SLAVE_DISPLAY_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_ebi_ch0 = {
.name = "slv_ebi_ch0",
.id = MSM8939_SLAVE_EBI_CH0,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 0,
};
static struct qcom_icc_node slv_gfx_cfg = {
.name = "slv_gfx_cfg",
.id = MSM8939_SLAVE_GRAPHICS_3D_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_imem_cfg = {
.name = "slv_imem_cfg",
.id = MSM8939_SLAVE_IMEM_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_imem = {
.name = "slv_imem",
.id = MSM8939_SLAVE_IMEM,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = 26,
};
static struct qcom_icc_node slv_mpm = {
.name = "slv_mpm",
.id = MSM8939_SLAVE_MPM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_msg_ram = {
.name = "slv_msg_ram",
.id = MSM8939_SLAVE_MSG_RAM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_mss = {
.name = "slv_mss",
.id = MSM8939_SLAVE_MSS,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_pdm = {
.name = "slv_pdm",
.id = MSM8939_SLAVE_PDM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_pmic_arb = {
.name = "slv_pmic_arb",
.id = MSM8939_SLAVE_PMIC_ARB,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_pcnoc_cfg = {
.name = "slv_pcnoc_cfg",
.id = MSM8939_SLAVE_PNOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_prng = {
.name = "slv_prng",
.id = MSM8939_SLAVE_PRNG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_qdss_cfg = {
.name = "slv_qdss_cfg",
.id = MSM8939_SLAVE_QDSS_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_qdss_stm = {
.name = "slv_qdss_stm",
.id = MSM8939_SLAVE_QDSS_STM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = 30,
};
static struct qcom_icc_node slv_rbcpr_cfg = {
.name = "slv_rbcpr_cfg",
.id = MSM8939_SLAVE_RBCPR_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_sdcc_1 = {
.name = "slv_sdcc_1",
.id = MSM8939_SLAVE_SDCC_1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_sdcc_2 = {
.name = "slv_sdcc_2",
.id = MSM8939_SLAVE_SDCC_2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_security = {
.name = "slv_security",
.id = MSM8939_SLAVE_SECURITY,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_snoc_cfg = {
.name = "slv_snoc_cfg",
.id = MSM8939_SLAVE_SNOC_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_spdm = {
.name = "slv_spdm",
.id = MSM8939_SLAVE_SPDM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_srvc_snoc = {
.name = "slv_srvc_snoc",
.id = MSM8939_SLAVE_SRVC_SNOC,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_tcsr = {
.name = "slv_tcsr",
.id = MSM8939_SLAVE_TCSR,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_tlmm = {
.name = "slv_tlmm",
.id = MSM8939_SLAVE_TLMM,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_usb_hs1 = {
.name = "slv_usb_hs1",
.id = MSM8939_SLAVE_USB_HS1,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_usb_hs2 = {
.name = "slv_usb_hs2",
.id = MSM8939_SLAVE_USB_HS2,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static struct qcom_icc_node slv_venus_cfg = {
.name = "slv_venus_cfg",
.id = MSM8939_SLAVE_VENUS_CFG,
.buswidth = 4,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
};
static const u16 snoc_bimc_0_mas_links[] = {
MSM8939_SNOC_BIMC_0_SLV
};
static struct qcom_icc_node snoc_bimc_0_mas = {
.name = "snoc_bimc_0_mas",
.id = MSM8939_SNOC_BIMC_0_MAS,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(snoc_bimc_0_mas_links),
.links = snoc_bimc_0_mas_links,
};
static const u16 snoc_bimc_0_slv_links[] = {
MSM8939_SLAVE_EBI_CH0
};
static struct qcom_icc_node snoc_bimc_0_slv = {
.name = "snoc_bimc_0_slv",
.id = MSM8939_SNOC_BIMC_0_SLV,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(snoc_bimc_0_slv_links),
.links = snoc_bimc_0_slv_links,
};
static const u16 snoc_bimc_1_mas_links[] = {
MSM8939_SNOC_BIMC_1_SLV
};
static struct qcom_icc_node snoc_bimc_1_mas = {
.name = "snoc_bimc_1_mas",
.id = MSM8939_SNOC_BIMC_1_MAS,
.buswidth = 16,
.mas_rpm_id = 76,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(snoc_bimc_1_mas_links),
.links = snoc_bimc_1_mas_links,
};
static const u16 snoc_bimc_1_slv_links[] = {
MSM8939_SLAVE_EBI_CH0
};
static struct qcom_icc_node snoc_bimc_1_slv = {
.name = "snoc_bimc_1_slv",
.id = MSM8939_SNOC_BIMC_1_SLV,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = 104,
.num_links = ARRAY_SIZE(snoc_bimc_1_slv_links),
.links = snoc_bimc_1_slv_links,
};
static const u16 snoc_bimc_2_mas_links[] = {
MSM8939_SNOC_BIMC_2_SLV
};
static struct qcom_icc_node snoc_bimc_2_mas = {
.name = "snoc_bimc_2_mas",
.id = MSM8939_SNOC_BIMC_2_MAS,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(snoc_bimc_2_mas_links),
.links = snoc_bimc_2_mas_links,
};
static const u16 snoc_bimc_2_slv_links[] = {
MSM8939_SLAVE_EBI_CH0
};
static struct qcom_icc_node snoc_bimc_2_slv = {
.name = "snoc_bimc_2_slv",
.id = MSM8939_SNOC_BIMC_2_SLV,
.buswidth = 16,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.qos.ap_owned = true,
.qos.qos_mode = NOC_QOS_MODE_INVALID,
.num_links = ARRAY_SIZE(snoc_bimc_2_slv_links),
.links = snoc_bimc_2_slv_links,
};
static const u16 snoc_int_0_links[] = {
MSM8939_SLAVE_QDSS_STM,
MSM8939_SLAVE_IMEM,
MSM8939_SNOC_PNOC_MAS
};
static struct qcom_icc_node snoc_int_0 = {
.name = "snoc_int_0",
.id = MSM8939_SNOC_INT_0,
.buswidth = 8,
.mas_rpm_id = 99,
.slv_rpm_id = 130,
.num_links = ARRAY_SIZE(snoc_int_0_links),
.links = snoc_int_0_links,
};
static const u16 snoc_int_1_links[] = {
MSM8939_SLAVE_APSS,
MSM8939_SLAVE_CATS_128,
MSM8939_SLAVE_OCMEM_64
};
static struct qcom_icc_node snoc_int_1 = {
.name = "snoc_int_1",
.id = MSM8939_SNOC_INT_1,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(snoc_int_1_links),
.links = snoc_int_1_links,
};
static const u16 snoc_int_bimc_links[] = {
MSM8939_SNOC_BIMC_1_MAS
};
static struct qcom_icc_node snoc_int_bimc = {
.name = "snoc_int_bimc",
.id = MSM8939_SNOC_INT_BIMC,
.buswidth = 8,
.mas_rpm_id = 101,
.slv_rpm_id = 132,
.num_links = ARRAY_SIZE(snoc_int_bimc_links),
.links = snoc_int_bimc_links,
};
static const u16 snoc_pcnoc_mas_links[] = {
MSM8939_SNOC_PNOC_SLV
};
static struct qcom_icc_node snoc_pcnoc_mas = {
.name = "snoc_pcnoc_mas",
.id = MSM8939_SNOC_PNOC_MAS,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(snoc_pcnoc_mas_links),
.links = snoc_pcnoc_mas_links,
};
static const u16 snoc_pcnoc_slv_links[] = {
MSM8939_PNOC_INT_0
};
static struct qcom_icc_node snoc_pcnoc_slv = {
.name = "snoc_pcnoc_slv",
.id = MSM8939_SNOC_PNOC_SLV,
.buswidth = 8,
.mas_rpm_id = -1,
.slv_rpm_id = -1,
.num_links = ARRAY_SIZE(snoc_pcnoc_slv_links),
.links = snoc_pcnoc_slv_links,
};
static struct qcom_icc_node * const msm8939_snoc_nodes[] = {
[BIMC_SNOC_SLV] = &bimc_snoc_slv,
[MASTER_QDSS_BAM] = &mas_qdss_bam,
[MASTER_QDSS_ETR] = &mas_qdss_etr,
[MASTER_SNOC_CFG] = &mas_snoc_cfg,
[PCNOC_SNOC_SLV] = &pcnoc_snoc_slv,
[SLAVE_APSS] = &slv_apss,
[SLAVE_CATS_128] = &slv_cats_0,
[SLAVE_OCMEM_64] = &slv_cats_1,
[SLAVE_IMEM] = &slv_imem,
[SLAVE_QDSS_STM] = &slv_qdss_stm,
[SLAVE_SRVC_SNOC] = &slv_srvc_snoc,
[SNOC_BIMC_0_MAS] = &snoc_bimc_0_mas,
[SNOC_BIMC_1_MAS] = &snoc_bimc_1_mas,
[SNOC_BIMC_2_MAS] = &snoc_bimc_2_mas,
[SNOC_INT_0] = &snoc_int_0,
[SNOC_INT_1] = &snoc_int_1,
[SNOC_INT_BIMC] = &snoc_int_bimc,
[SNOC_PCNOC_MAS] = &snoc_pcnoc_mas,
[SNOC_QDSS_INT] = &qdss_int,
};
static const struct regmap_config msm8939_snoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x14080,
.fast_io = true,
};
static const struct qcom_icc_desc msm8939_snoc = {
.type = QCOM_ICC_NOC,
.nodes = msm8939_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_snoc_nodes),
.bus_clk_desc = &bus_1_clk,
.regmap_cfg = &msm8939_snoc_regmap_config,
.qos_offset = 0x7000,
};
static struct qcom_icc_node * const msm8939_snoc_mm_nodes[] = {
[MASTER_VIDEO_P0] = &mas_video,
[MASTER_JPEG] = &mas_jpeg,
[MASTER_VFE] = &mas_vfe,
[MASTER_MDP_PORT0] = &mas_mdp0,
[MASTER_MDP_PORT1] = &mas_mdp1,
[MASTER_CPP] = &mas_cpp,
[SNOC_MM_INT_0] = &mm_int_0,
[SNOC_MM_INT_1] = &mm_int_1,
[SNOC_MM_INT_2] = &mm_int_2,
};
static const struct qcom_icc_desc msm8939_snoc_mm = {
.type = QCOM_ICC_NOC,
.nodes = msm8939_snoc_mm_nodes,
.num_nodes = ARRAY_SIZE(msm8939_snoc_mm_nodes),
.bus_clk_desc = &bus_2_clk,
.regmap_cfg = &msm8939_snoc_regmap_config,
.qos_offset = 0x7000,
};
static struct qcom_icc_node * const msm8939_bimc_nodes[] = {
[BIMC_SNOC_MAS] = &bimc_snoc_mas,
[MASTER_AMPSS_M0] = &mas_apss,
[MASTER_GRAPHICS_3D] = &mas_gfx,
[MASTER_TCU0] = &mas_tcu0,
[SLAVE_AMPSS_L2] = &slv_apps_l2,
[SLAVE_EBI_CH0] = &slv_ebi_ch0,
[SNOC_BIMC_0_SLV] = &snoc_bimc_0_slv,
[SNOC_BIMC_1_SLV] = &snoc_bimc_1_slv,
[SNOC_BIMC_2_SLV] = &snoc_bimc_2_slv,
};
static const struct regmap_config msm8939_bimc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x62000,
.fast_io = true,
};
static const struct qcom_icc_desc msm8939_bimc = {
.type = QCOM_ICC_BIMC,
.nodes = msm8939_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_bimc_nodes),
.bus_clk_desc = &bimc_clk,
.regmap_cfg = &msm8939_bimc_regmap_config,
.qos_offset = 0x8000,
};
static struct qcom_icc_node * const msm8939_pcnoc_nodes[] = {
[MASTER_BLSP_1] = &mas_blsp_1,
[MASTER_DEHR] = &mas_dehr,
[MASTER_LPASS] = &mas_audio,
[MASTER_CRYPTO_CORE0] = &mas_pcnoc_crypto_0,
[MASTER_SDCC_1] = &mas_pcnoc_sdcc_1,
[MASTER_SDCC_2] = &mas_pcnoc_sdcc_2,
[MASTER_SPDM] = &mas_spdm,
[MASTER_USB_HS1] = &mas_usb_hs1,
[MASTER_USB_HS2] = &mas_usb_hs2,
[PCNOC_INT_0] = &pcnoc_int_0,
[PCNOC_INT_1] = &pcnoc_int_1,
[PCNOC_MAS_0] = &pcnoc_m_0,
[PCNOC_MAS_1] = &pcnoc_m_1,
[PCNOC_SLV_0] = &pcnoc_s_0,
[PCNOC_SLV_1] = &pcnoc_s_1,
[PCNOC_SLV_2] = &pcnoc_s_2,
[PCNOC_SLV_3] = &pcnoc_s_3,
[PCNOC_SLV_4] = &pcnoc_s_4,
[PCNOC_SLV_8] = &pcnoc_s_8,
[PCNOC_SLV_9] = &pcnoc_s_9,
[PCNOC_SNOC_MAS] = &pcnoc_snoc_mas,
[SLAVE_BIMC_CFG] = &slv_bimc_cfg,
[SLAVE_BLSP_1] = &slv_blsp_1,
[SLAVE_BOOT_ROM] = &slv_boot_rom,
[SLAVE_CAMERA_CFG] = &slv_camera_cfg,
[SLAVE_CLK_CTL] = &slv_clk_ctl,
[SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
[SLAVE_DEHR_CFG] = &slv_dehr_cfg,
[SLAVE_DISPLAY_CFG] = &slv_display_cfg,
[SLAVE_GRAPHICS_3D_CFG] = &slv_gfx_cfg,
[SLAVE_IMEM_CFG] = &slv_imem_cfg,
[SLAVE_LPASS] = &slv_audio,
[SLAVE_MPM] = &slv_mpm,
[SLAVE_MSG_RAM] = &slv_msg_ram,
[SLAVE_MSS] = &slv_mss,
[SLAVE_PDM] = &slv_pdm,
[SLAVE_PMIC_ARB] = &slv_pmic_arb,
[SLAVE_PCNOC_CFG] = &slv_pcnoc_cfg,
[SLAVE_PRNG] = &slv_prng,
[SLAVE_QDSS_CFG] = &slv_qdss_cfg,
[SLAVE_RBCPR_CFG] = &slv_rbcpr_cfg,
[SLAVE_SDCC_1] = &slv_sdcc_1,
[SLAVE_SDCC_2] = &slv_sdcc_2,
[SLAVE_SECURITY] = &slv_security,
[SLAVE_SNOC_CFG] = &slv_snoc_cfg,
[SLAVE_SPDM] = &slv_spdm,
[SLAVE_TCSR] = &slv_tcsr,
[SLAVE_TLMM] = &slv_tlmm,
[SLAVE_USB_HS1] = &slv_usb_hs1,
[SLAVE_USB_HS2] = &slv_usb_hs2,
[SLAVE_VENUS_CFG] = &slv_venus_cfg,
[SNOC_PCNOC_SLV] = &snoc_pcnoc_slv,
};
static const struct regmap_config msm8939_pcnoc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x11000,
.fast_io = true,
};
static const struct qcom_icc_desc msm8939_pcnoc = {
.type = QCOM_ICC_NOC,
.nodes = msm8939_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_pcnoc_nodes),
.bus_clk_desc = &bus_0_clk,
.regmap_cfg = &msm8939_pcnoc_regmap_config,
.qos_offset = 0x7000,
};
static const struct of_device_id msm8939_noc_of_match[] = {
{ .compatible = "qcom,msm8939-bimc", .data = &msm8939_bimc },
{ .compatible = "qcom,msm8939-pcnoc", .data = &msm8939_pcnoc },
{ .compatible = "qcom,msm8939-snoc", .data = &msm8939_snoc },
{ .compatible = "qcom,msm8939-snoc-mm", .data = &msm8939_snoc_mm },
{ }
};
MODULE_DEVICE_TABLE(of, msm8939_noc_of_match);
static struct platform_driver msm8939_noc_driver = {
.probe = qnoc_probe,
.remove = qnoc_remove,
.driver = {
.name = "qnoc-msm8939",
.of_match_table = msm8939_noc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(msm8939_noc_driver);
MODULE_AUTHOR("Jun Nie <[email protected]>");
MODULE_DESCRIPTION("Qualcomm MSM8939 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/msm8939.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021, Linaro Limited
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <dt-bindings/interconnect/qcom,sm8450.h>
#include "bcm-voter.h"
#include "icc-common.h"
#include "icc-rpmh.h"
#include "sm8450.h"
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
.id = SM8450_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.id = SM8450_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qnm_a1noc_cfg = {
.name = "qnm_a1noc_cfg",
.id = SM8450_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_SERVICE_A1NOC },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
.id = SM8450_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SM8450_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.id = SM8450_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SM8450_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
.id = SM8450_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
.id = SM8450_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qnm_a2noc_cfg = {
.name = "qnm_a2noc_cfg",
.id = SM8450_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_SERVICE_A2NOC },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SM8450_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SM8450_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_sensorss_q6 = {
.name = "qxm_sensorss_q6",
.id = SM8450_MASTER_SENSORS_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_sp = {
.name = "qxm_sp",
.id = SM8450_MASTER_SP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
.id = SM8450_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
.id = SM8450_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
.id = SM8450_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = SM8450_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_QUP_CORE_0 },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
.id = SM8450_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_QUP_CORE_1 },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
.id = SM8450_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_QUP_CORE_2 },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
.id = SM8450_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 51,
.links = { SM8450_SLAVE_AHB2PHY_SOUTH, SM8450_SLAVE_AHB2PHY_NORTH,
SM8450_SLAVE_AOSS, SM8450_SLAVE_CAMERA_CFG,
SM8450_SLAVE_CLK_CTL, SM8450_SLAVE_CDSP_CFG,
SM8450_SLAVE_RBCPR_CX_CFG, SM8450_SLAVE_RBCPR_MMCX_CFG,
SM8450_SLAVE_RBCPR_MXA_CFG, SM8450_SLAVE_RBCPR_MXC_CFG,
SM8450_SLAVE_CRYPTO_0_CFG, SM8450_SLAVE_CX_RDPM,
SM8450_SLAVE_DISPLAY_CFG, SM8450_SLAVE_GFX3D_CFG,
SM8450_SLAVE_IMEM_CFG, SM8450_SLAVE_IPA_CFG,
SM8450_SLAVE_IPC_ROUTER_CFG, SM8450_SLAVE_LPASS,
SM8450_SLAVE_CNOC_MSS, SM8450_SLAVE_MX_RDPM,
SM8450_SLAVE_PCIE_0_CFG, SM8450_SLAVE_PCIE_1_CFG,
SM8450_SLAVE_PDM, SM8450_SLAVE_PIMEM_CFG,
SM8450_SLAVE_PRNG, SM8450_SLAVE_QDSS_CFG,
SM8450_SLAVE_QSPI_0, SM8450_SLAVE_QUP_0,
SM8450_SLAVE_QUP_1, SM8450_SLAVE_QUP_2,
SM8450_SLAVE_SDCC_2, SM8450_SLAVE_SDCC_4,
SM8450_SLAVE_SPSS_CFG, SM8450_SLAVE_TCSR,
SM8450_SLAVE_TLMM, SM8450_SLAVE_TME_CFG,
SM8450_SLAVE_UFS_MEM_CFG, SM8450_SLAVE_USB3_0,
SM8450_SLAVE_VENUS_CFG, SM8450_SLAVE_VSENSE_CTRL_CFG,
SM8450_SLAVE_A1NOC_CFG, SM8450_SLAVE_A2NOC_CFG,
SM8450_SLAVE_DDRSS_CFG, SM8450_SLAVE_CNOC_MNOC_CFG,
SM8450_SLAVE_PCIE_ANOC_CFG, SM8450_SLAVE_SNOC_CFG,
SM8450_SLAVE_IMEM, SM8450_SLAVE_PIMEM,
SM8450_SLAVE_SERVICE_CNOC, SM8450_SLAVE_QDSS_STM,
SM8450_SLAVE_TCU },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
.id = SM8450_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8450_SLAVE_PCIE_0, SM8450_SLAVE_PCIE_1 },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
.id = SM8450_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
.id = SM8450_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
.id = SM8450_MASTER_APPSS_PROC,
.channels = 3,
.buswidth = 32,
.num_links = 3,
.links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC,
SM8450_SLAVE_MEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
.id = SM8450_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_mdsp = {
.name = "qnm_mdsp",
.id = SM8450_MASTER_MSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC,
SM8450_SLAVE_MEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SM8450_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SM8450_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_nsp_gemnoc = {
.name = "qnm_nsp_gemnoc",
.id = SM8450_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
.id = SM8450_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
.links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SM8450_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SM8450_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC,
SM8450_SLAVE_MEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qhm_config_noc = {
.name = "qhm_config_noc",
.id = SM8450_MASTER_CNOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 6,
.links = { SM8450_SLAVE_LPASS_CORE_CFG, SM8450_SLAVE_LPASS_LPI_CFG,
SM8450_SLAVE_LPASS_MPU_CFG, SM8450_SLAVE_LPASS_TOP_CFG,
SM8450_SLAVE_SERVICES_LPASS_AML_NOC, SM8450_SLAVE_SERVICE_LPASS_AG_NOC },
};
static struct qcom_icc_node qxm_lpass_dsp = {
.name = "qxm_lpass_dsp",
.id = SM8450_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 4,
.links = { SM8450_SLAVE_LPASS_TOP_CFG, SM8450_SLAVE_LPASS_SNOC,
SM8450_SLAVE_SERVICES_LPASS_AML_NOC, SM8450_SLAVE_SERVICE_LPASS_AG_NOC },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SM8450_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_EBI1 },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
.id = SM8450_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
.id = SM8450_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
.id = SM8450_MASTER_CAMNOC_SF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_mdp = {
.name = "qnm_mdp",
.id = SM8450_MASTER_MDP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_mnoc_cfg = {
.name = "qnm_mnoc_cfg",
.id = SM8450_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_SERVICE_MNOC },
};
static struct qcom_icc_node qnm_rot = {
.name = "qnm_rot",
.id = SM8450_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_vapss_hcp = {
.name = "qnm_vapss_hcp",
.id = SM8450_MASTER_CDSP_HCP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video = {
.name = "qnm_video",
.id = SM8450_MASTER_VIDEO,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_cv_cpu = {
.name = "qnm_video_cv_cpu",
.id = SM8450_MASTER_VIDEO_CV_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
.id = SM8450_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
.id = SM8450_MASTER_VIDEO_V_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
.name = "qhm_nsp_noc_config",
.id = SM8450_MASTER_CDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_SERVICE_NSP_NOC },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
.id = SM8450_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_CDSP_MEM_NOC },
};
static struct qcom_icc_node qnm_pcie_anoc_cfg = {
.name = "qnm_pcie_anoc_cfg",
.id = SM8450_MASTER_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_SERVICE_PCIE_ANOC },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
.id = SM8450_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
.id = SM8450_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node qhm_gic = {
.name = "qhm_gic",
.id = SM8450_MASTER_GIC_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
.id = SM8450_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
.id = SM8450_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_lpass_noc = {
.name = "qnm_lpass_noc",
.id = SM8450_MASTER_LPASS_ANOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_snoc_cfg = {
.name = "qnm_snoc_cfg",
.id = SM8450_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.id = SM8450_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = SM8450_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node qnm_mnoc_hf_disp = {
.name = "qnm_mnoc_hf_disp",
.id = SM8450_MASTER_MNOC_HF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_LLCC_DISP },
};
static struct qcom_icc_node qnm_mnoc_sf_disp = {
.name = "qnm_mnoc_sf_disp",
.id = SM8450_MASTER_MNOC_SF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_LLCC_DISP },
};
static struct qcom_icc_node qnm_pcie_disp = {
.name = "qnm_pcie_disp",
.id = SM8450_MASTER_ANOC_PCIE_GEM_NOC_DISP,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8450_SLAVE_LLCC_DISP },
};
static struct qcom_icc_node llcc_mc_disp = {
.name = "llcc_mc_disp",
.id = SM8450_MASTER_LLCC_DISP,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_SLAVE_EBI1_DISP },
};
static struct qcom_icc_node qnm_mdp_disp = {
.name = "qnm_mdp_disp",
.id = SM8450_MASTER_MDP_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP },
};
static struct qcom_icc_node qnm_rot_disp = {
.name = "qnm_rot_disp",
.id = SM8450_MASTER_ROTATOR_DISP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SM8450_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8450_MASTER_A1NOC_SNOC },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
.id = SM8450_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
.id = SM8450_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8450_MASTER_A2NOC_SNOC },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
.id = SM8450_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = SM8450_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
.id = SM8450_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
.id = SM8450_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
.id = SM8450_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
.id = SM8450_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SM8450_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
.id = SM8450_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SM8450_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_compute_cfg = {
.name = "qhs_compute_cfg",
.id = SM8450_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { MASTER_CDSP_NOC_CFG },
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = SM8450_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
.id = SM8450_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mxa = {
.name = "qhs_cpr_mxa",
.id = SM8450_SLAVE_RBCPR_MXA_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mxc = {
.name = "qhs_cpr_mxc",
.id = SM8450_SLAVE_RBCPR_MXC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SM8450_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
.id = SM8450_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
.id = SM8450_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
.id = SM8450_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SM8450_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SM8450_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
.id = SM8450_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
.id = SM8450_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { MASTER_CNOC_LPASS_AG_NOC },
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
.id = SM8450_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
.id = SM8450_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
.id = SM8450_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
.id = SM8450_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SM8450_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = SM8450_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
.id = SM8450_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SM8450_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
.id = SM8450_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
.id = SM8450_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
.id = SM8450_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
.id = SM8450_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
.id = SM8450_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
.id = SM8450_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
.id = SM8450_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SM8450_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
.id = SM8450_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_tme_cfg = {
.name = "qhs_tme_cfg",
.id = SM8450_SLAVE_TME_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
.id = SM8450_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
.id = SM8450_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
.id = SM8450_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
.id = SM8450_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_a1_noc_cfg = {
.name = "qns_a1_noc_cfg",
.id = SM8450_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_MASTER_A1NOC_CFG },
};
static struct qcom_icc_node qns_a2_noc_cfg = {
.name = "qns_a2_noc_cfg",
.id = SM8450_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_MASTER_A2NOC_CFG },
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
.id = SM8450_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
//FIXME where is link
};
static struct qcom_icc_node qns_mnoc_cfg = {
.name = "qns_mnoc_cfg",
.id = SM8450_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_MASTER_CNOC_MNOC_CFG },
};
static struct qcom_icc_node qns_pcie_anoc_cfg = {
.name = "qns_pcie_anoc_cfg",
.id = SM8450_SLAVE_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_MASTER_PCIE_ANOC_CFG },
};
static struct qcom_icc_node qns_snoc_cfg = {
.name = "qns_snoc_cfg",
.id = SM8450_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8450_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SM8450_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
.id = SM8450_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
.id = SM8450_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
.id = SM8450_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
.id = SM8450_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SM8450_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SM8450_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
.id = SM8450_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8450_MASTER_GEM_NOC_CNOC },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SM8450_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
.links = { SM8450_MASTER_LLCC },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
.id = SM8450_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_MASTER_GEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qhs_lpass_core = {
.name = "qhs_lpass_core",
.id = SM8450_SLAVE_LPASS_CORE_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_lpass_lpi = {
.name = "qhs_lpass_lpi",
.id = SM8450_SLAVE_LPASS_LPI_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_lpass_mpu = {
.name = "qhs_lpass_mpu",
.id = SM8450_SLAVE_LPASS_MPU_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_lpass_top = {
.name = "qhs_lpass_top",
.id = SM8450_SLAVE_LPASS_TOP_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_sysnoc = {
.name = "qns_sysnoc",
.id = SM8450_SLAVE_LPASS_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8450_MASTER_LPASS_ANOC },
};
static struct qcom_icc_node srvc_niu_aml_noc = {
.name = "srvc_niu_aml_noc",
.id = SM8450_SLAVE_SERVICES_LPASS_AML_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node srvc_niu_lpass_agnoc = {
.name = "srvc_niu_lpass_agnoc",
.id = SM8450_SLAVE_SERVICE_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SM8450_SLAVE_EBI1,
.channels = 4,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
.id = SM8450_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_MASTER_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
.id = SM8450_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_MASTER_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
.id = SM8450_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
.id = SM8450_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_MASTER_COMPUTE_NOC },
};
static struct qcom_icc_node service_nsp_noc = {
.name = "service_nsp_noc",
.id = SM8450_SLAVE_SERVICE_NSP_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
.id = SM8450_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8450_MASTER_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node srvc_pcie_aggre_noc = {
.name = "srvc_pcie_aggre_noc",
.id = SM8450_SLAVE_SERVICE_PCIE_ANOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
.id = SM8450_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8450_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
.id = SM8450_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8450_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SM8450_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_llcc_disp = {
.name = "qns_llcc_disp",
.id = SM8450_SLAVE_LLCC_DISP,
.channels = 4,
.buswidth = 16,
.num_links = 1,
.links = { SM8450_MASTER_LLCC_DISP },
};
static struct qcom_icc_node ebi_disp = {
.name = "ebi_disp",
.id = SM8450_SLAVE_EBI1_DISP,
.channels = 4,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf_disp = {
.name = "qns_mem_noc_hf_disp",
.id = SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_MASTER_MNOC_HF_MEM_NOC_DISP },
};
static struct qcom_icc_node qns_mem_noc_sf_disp = {
.name = "qns_mem_noc_sf_disp",
.id = SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8450_MASTER_MNOC_SF_MEM_NOC_DISP },
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.enable_mask = 0x8,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.enable_mask = 0x1,
.keepalive = true,
.num_nodes = 55,
.nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie,
&qhs_ahb2phy0, &qhs_ahb2phy1,
&qhs_aoss, &qhs_camera_cfg,
&qhs_clk_ctl, &qhs_compute_cfg,
&qhs_cpr_cx, &qhs_cpr_mmcx,
&qhs_cpr_mxa, &qhs_cpr_mxc,
&qhs_crypto0_cfg, &qhs_cx_rdpm,
&qhs_display_cfg, &qhs_gpuss_cfg,
&qhs_imem_cfg, &qhs_ipa,
&qhs_ipc_router, &qhs_lpass_cfg,
&qhs_mss_cfg, &qhs_mx_rdpm,
&qhs_pcie0_cfg, &qhs_pcie1_cfg,
&qhs_pdm, &qhs_pimem_cfg,
&qhs_prng, &qhs_qdss_cfg,
&qhs_qspi, &qhs_qup0,
&qhs_qup1, &qhs_qup2,
&qhs_sdc2, &qhs_sdc4,
&qhs_spss_cfg, &qhs_tcsr,
&qhs_tlmm, &qhs_tme_cfg,
&qhs_ufs_mem_cfg, &qhs_usb3_0,
&qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
&qns_a1_noc_cfg, &qns_a2_noc_cfg,
&qns_ddrss_cfg, &qns_mnoc_cfg,
&qns_pcie_anoc_cfg, &qns_snoc_cfg,
&qxs_imem, &qxs_pimem,
&srvc_cnoc, &xs_pcie_0,
&xs_pcie_1, &xs_qdss_stm,
&xs_sys_tcu_cfg },
};
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
.enable_mask = 0x1,
.num_nodes = 2,
.nodes = { &qxm_nsp, &qns_nsp_gemnoc },
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.enable_mask = 0x1,
.num_nodes = 12,
.nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp,
&qnm_camnoc_sf, &qnm_mdp,
&qnm_mnoc_cfg, &qnm_rot,
&qnm_vapss_hcp, &qnm_video,
&qnm_video_cv_cpu, &qnm_video_cvp,
&qnm_video_v_cpu, &qns_mem_noc_sf },
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.keepalive = true,
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup0_core_slave },
};
static struct qcom_icc_bcm bcm_qup1 = {
.name = "QUP1",
.keepalive = true,
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup1_core_slave },
};
static struct qcom_icc_bcm bcm_qup2 = {
.name = "QUP2",
.keepalive = true,
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup2_core_slave },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_sh1 = {
.name = "SH1",
.enable_mask = 0x1,
.num_nodes = 7,
.nodes = { &alm_gpu_tcu, &alm_sys_tcu,
&qnm_nsp_gemnoc, &qnm_pcie,
&qnm_snoc_gc, &qns_gem_noc_cnoc,
&qns_pcie },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.enable_mask = 0x1,
.num_nodes = 4,
.nodes = { &qhm_gic, &qxm_pimem,
&xm_gic, &qns_gemnoc_gc },
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.num_nodes = 1,
.nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.num_nodes = 1,
.nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.num_nodes = 1,
.nodes = { &qnm_lpass_noc },
};
static struct qcom_icc_bcm bcm_sn7 = {
.name = "SN7",
.num_nodes = 1,
.nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_bcm bcm_acv_disp = {
.name = "ACV",
.enable_mask = 0x1,
.num_nodes = 1,
.nodes = { &ebi_disp },
};
static struct qcom_icc_bcm bcm_mc0_disp = {
.name = "MC0",
.num_nodes = 1,
.nodes = { &ebi_disp },
};
static struct qcom_icc_bcm bcm_mm0_disp = {
.name = "MM0",
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf_disp },
};
static struct qcom_icc_bcm bcm_mm1_disp = {
.name = "MM1",
.enable_mask = 0x1,
.num_nodes = 3,
.nodes = { &qnm_mdp_disp, &qnm_rot_disp,
&qns_mem_noc_sf_disp },
};
static struct qcom_icc_bcm bcm_sh0_disp = {
.name = "SH0",
.num_nodes = 1,
.nodes = { &qns_llcc_disp },
};
static struct qcom_icc_bcm bcm_sh1_disp = {
.name = "SH1",
.enable_mask = 0x1,
.num_nodes = 1,
.nodes = { &qnm_pcie_disp },
};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
[MASTER_SDCC_4] = &xm_sdc4,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[MASTER_USB3_0] = &xm_usb3_0,
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
static const struct qcom_icc_desc sm8450_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_2] = &qhm_qup2,
[MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
[MASTER_CRYPTO] = &qxm_crypto,
[MASTER_IPA] = &qxm_ipa,
[MASTER_SENSORS_PROC] = &qxm_sensorss_q6,
[MASTER_SP] = &qxm_sp,
[MASTER_QDSS_ETR] = &xm_qdss_etr_0,
[MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
[MASTER_SDCC_2] = &xm_sdc2,
[SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
static const struct qcom_icc_desc sm8450_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
};
static const struct qcom_icc_desc sm8450_clk_virt = {
.nodes = clk_virt_nodes,
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
[SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CDSP_CFG] = &qhs_compute_cfg,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
[SLAVE_RBCPR_MXA_CFG] = &qhs_cpr_mxa,
[SLAVE_RBCPR_MXC_CFG] = &qhs_cpr_mxc,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_CX_RDPM] = &qhs_cx_rdpm,
[SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
[SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
[SLAVE_LPASS] = &qhs_lpass_cfg,
[SLAVE_CNOC_MSS] = &qhs_mss_cfg,
[SLAVE_MX_RDPM] = &qhs_mx_rdpm,
[SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
[SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QSPI_0] = &qhs_qspi,
[SLAVE_QUP_0] = &qhs_qup0,
[SLAVE_QUP_1] = &qhs_qup1,
[SLAVE_QUP_2] = &qhs_qup2,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SDCC_4] = &qhs_sdc4,
[SLAVE_SPSS_CFG] = &qhs_spss_cfg,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM] = &qhs_tlmm,
[SLAVE_TME_CFG] = &qhs_tme_cfg,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB3_0] = &qhs_usb3_0,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg,
[SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
[SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg,
[SLAVE_PCIE_ANOC_CFG] = &qns_pcie_anoc_cfg,
[SLAVE_SNOC_CFG] = &qns_snoc_cfg,
[SLAVE_IMEM] = &qxs_imem,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
[SLAVE_PCIE_0] = &xs_pcie_0,
[SLAVE_PCIE_1] = &xs_pcie_1,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sm8450_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh1,
&bcm_sh0_disp,
&bcm_sh1_disp,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
[MASTER_GFX3D] = &qnm_gpu,
[MASTER_MSS_PROC] = &qnm_mdsp,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc,
[MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
[MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
[MASTER_MNOC_SF_MEM_NOC_DISP] = &qnm_mnoc_sf_disp,
[MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp,
[SLAVE_LLCC_DISP] = &qns_llcc_disp,
};
static const struct qcom_icc_desc sm8450_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
};
static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[MASTER_LPASS_PROC] = &qxm_lpass_dsp,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
[SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
[SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
[SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
[SLAVE_LPASS_SNOC] = &qns_sysnoc,
[SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
[SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
};
static const struct qcom_icc_desc sm8450_lpass_ag_noc = {
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
&bcm_acv_disp,
&bcm_mc0_disp,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
[MASTER_LLCC_DISP] = &llcc_mc_disp,
[SLAVE_EBI1_DISP] = &ebi_disp,
};
static const struct qcom_icc_desc sm8450_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm0_disp,
&bcm_mm1_disp,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
[MASTER_MDP] = &qnm_mdp,
[MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
[MASTER_ROTATOR] = &qnm_rot,
[MASTER_CDSP_HCP] = &qnm_vapss_hcp,
[MASTER_VIDEO] = &qnm_video,
[MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
[MASTER_VIDEO_PROC] = &qnm_video_cvp,
[MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
[MASTER_MDP_DISP] = &qnm_mdp_disp,
[MASTER_ROTATOR_DISP] = &qnm_rot_disp,
[SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
[SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
};
static const struct qcom_icc_desc sm8450_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
&bcm_co0,
};
static struct qcom_icc_node * const nsp_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
[SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
};
static const struct qcom_icc_desc sm8450_nsp_noc = {
.nodes = nsp_noc_nodes,
.num_nodes = ARRAY_SIZE(nsp_noc_nodes),
.bcms = nsp_noc_bcms,
.num_bcms = ARRAY_SIZE(nsp_noc_bcms),
};
static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
&bcm_sn7,
};
static struct qcom_icc_node * const pcie_anoc_nodes[] = {
[MASTER_PCIE_ANOC_CFG] = &qnm_pcie_anoc_cfg,
[MASTER_PCIE_0] = &xm_pcie3_0,
[MASTER_PCIE_1] = &xm_pcie3_1,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
[SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
};
static const struct qcom_icc_desc sm8450_pcie_anoc = {
.nodes = pcie_anoc_nodes,
.num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
.bcms = pcie_anoc_bcms,
.num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
&bcm_sn3,
&bcm_sn4,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_GIC_AHB] = &qhm_gic,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_LPASS_ANOC] = &qnm_lpass_noc,
[MASTER_SNOC_CFG] = &qnm_snoc_cfg,
[MASTER_PIMEM] = &qxm_pimem,
[MASTER_GIC] = &xm_gic,
[SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
};
static const struct qcom_icc_desc sm8450_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sm8450-aggre1-noc",
.data = &sm8450_aggre1_noc},
{ .compatible = "qcom,sm8450-aggre2-noc",
.data = &sm8450_aggre2_noc},
{ .compatible = "qcom,sm8450-clk-virt",
.data = &sm8450_clk_virt},
{ .compatible = "qcom,sm8450-config-noc",
.data = &sm8450_config_noc},
{ .compatible = "qcom,sm8450-gem-noc",
.data = &sm8450_gem_noc},
{ .compatible = "qcom,sm8450-lpass-ag-noc",
.data = &sm8450_lpass_ag_noc},
{ .compatible = "qcom,sm8450-mc-virt",
.data = &sm8450_mc_virt},
{ .compatible = "qcom,sm8450-mmss-noc",
.data = &sm8450_mmss_noc},
{ .compatible = "qcom,sm8450-nsp-noc",
.data = &sm8450_nsp_noc},
{ .compatible = "qcom,sm8450-pcie-anoc",
.data = &sm8450_pcie_anoc},
{ .compatible = "qcom,sm8450-system-noc",
.data = &sm8450_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8450",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
static int __init qnoc_driver_init(void)
{
return platform_driver_register(&qnoc_driver);
}
core_initcall(qnoc_driver_init);
static void __exit qnoc_driver_exit(void)
{
platform_driver_unregister(&qnoc_driver);
}
module_exit(qnoc_driver_exit);
MODULE_DESCRIPTION("sm8450 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/sm8450.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022, Linaro Limited
*
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <dt-bindings/interconnect/qcom,sm8550-rpmh.h>
#include "bcm-voter.h"
#include "icc-common.h"
#include "icc-rpmh.h"
#include "sm8550.h"
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
.id = SM8550_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.id = SM8550_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
.id = SM8550_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SM8550_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.id = SM8550_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SM8550_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
.id = SM8550_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SM8550_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SM8550_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_sp = {
.name = "qxm_sp",
.id = SM8550_MASTER_SP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
.id = SM8550_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
.id = SM8550_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
.id = SM8550_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = SM8550_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_QUP_CORE_0 },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
.id = SM8550_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_QUP_CORE_1 },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
.id = SM8550_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_QUP_CORE_2 },
};
static struct qcom_icc_node qsm_cfg = {
.name = "qsm_cfg",
.id = SM8550_MASTER_CNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 44,
.links = { SM8550_SLAVE_AHB2PHY_SOUTH, SM8550_SLAVE_AHB2PHY_NORTH,
SM8550_SLAVE_APPSS, SM8550_SLAVE_CAMERA_CFG,
SM8550_SLAVE_CLK_CTL, SM8550_SLAVE_RBCPR_CX_CFG,
SM8550_SLAVE_RBCPR_MMCX_CFG, SM8550_SLAVE_RBCPR_MXA_CFG,
SM8550_SLAVE_RBCPR_MXC_CFG, SM8550_SLAVE_CPR_NSPCX,
SM8550_SLAVE_CRYPTO_0_CFG, SM8550_SLAVE_CX_RDPM,
SM8550_SLAVE_DISPLAY_CFG, SM8550_SLAVE_GFX3D_CFG,
SM8550_SLAVE_I2C, SM8550_SLAVE_IMEM_CFG,
SM8550_SLAVE_IPA_CFG, SM8550_SLAVE_IPC_ROUTER_CFG,
SM8550_SLAVE_CNOC_MSS, SM8550_SLAVE_MX_RDPM,
SM8550_SLAVE_PCIE_0_CFG, SM8550_SLAVE_PCIE_1_CFG,
SM8550_SLAVE_PDM, SM8550_SLAVE_PIMEM_CFG,
SM8550_SLAVE_PRNG, SM8550_SLAVE_QDSS_CFG,
SM8550_SLAVE_QSPI_0, SM8550_SLAVE_QUP_1,
SM8550_SLAVE_QUP_2, SM8550_SLAVE_SDCC_2,
SM8550_SLAVE_SDCC_4, SM8550_SLAVE_SPSS_CFG,
SM8550_SLAVE_TCSR, SM8550_SLAVE_TLMM,
SM8550_SLAVE_UFS_MEM_CFG, SM8550_SLAVE_USB3_0,
SM8550_SLAVE_VENUS_CFG, SM8550_SLAVE_VSENSE_CTRL_CFG,
SM8550_SLAVE_LPASS_QTB_CFG, SM8550_SLAVE_CNOC_MNOC_CFG,
SM8550_SLAVE_NSP_QTB_CFG, SM8550_SLAVE_PCIE_ANOC_CFG,
SM8550_SLAVE_QDSS_STM, SM8550_SLAVE_TCU },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
.id = SM8550_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 6,
.links = { SM8550_SLAVE_AOSS, SM8550_SLAVE_TME_CFG,
SM8550_SLAVE_CNOC_CFG, SM8550_SLAVE_DDRSS_CFG,
SM8550_SLAVE_BOOT_IMEM, SM8550_SLAVE_IMEM },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
.id = SM8550_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8550_SLAVE_PCIE_0, SM8550_SLAVE_PCIE_1 },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
.id = SM8550_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
.id = SM8550_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
.id = SM8550_MASTER_APPSS_PROC,
.channels = 3,
.buswidth = 32,
.num_links = 3,
.links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC,
SM8550_SLAVE_MEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
.id = SM8550_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_lpass_gemnoc = {
.name = "qnm_lpass_gemnoc",
.id = SM8550_MASTER_LPASS_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC,
SM8550_SLAVE_MEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qnm_mdsp = {
.name = "qnm_mdsp",
.id = SM8550_MASTER_MSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC,
SM8550_SLAVE_MEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SM8550_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SM8550_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_nsp_gemnoc = {
.name = "qnm_nsp_gemnoc",
.id = SM8550_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
.id = SM8550_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
.links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SM8550_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SM8550_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC,
SM8550_SLAVE_MEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qnm_lpiaon_noc = {
.name = "qnm_lpiaon_noc",
.id = SM8550_MASTER_LPIAON_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_SLAVE_LPASS_GEM_NOC },
};
static struct qcom_icc_node qnm_lpass_lpinoc = {
.name = "qnm_lpass_lpinoc",
.id = SM8550_MASTER_LPASS_LPINOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_SLAVE_LPIAON_NOC_LPASS_AG_NOC },
};
static struct qcom_icc_node qxm_lpinoc_dsp_axim = {
.name = "qxm_lpinoc_dsp_axim",
.id = SM8550_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_SLAVE_LPICX_NOC_LPIAON_NOC },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SM8550_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_EBI1 },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
.id = SM8550_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
.id = SM8550_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
.id = SM8550_MASTER_CAMNOC_SF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_mdp = {
.name = "qnm_mdp",
.id = SM8550_MASTER_MDP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_vapss_hcp = {
.name = "qnm_vapss_hcp",
.id = SM8550_MASTER_CDSP_HCP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video = {
.name = "qnm_video",
.id = SM8550_MASTER_VIDEO,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_cv_cpu = {
.name = "qnm_video_cv_cpu",
.id = SM8550_MASTER_VIDEO_CV_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
.id = SM8550_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
.id = SM8550_MASTER_VIDEO_V_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qsm_mnoc_cfg = {
.name = "qsm_mnoc_cfg",
.id = SM8550_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_SERVICE_MNOC },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
.id = SM8550_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_CDSP_MEM_NOC },
};
static struct qcom_icc_node qsm_pcie_anoc_cfg = {
.name = "qsm_pcie_anoc_cfg",
.id = SM8550_MASTER_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_SERVICE_PCIE_ANOC },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
.id = SM8550_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
.id = SM8550_MASTER_PCIE_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node qhm_gic = {
.name = "qhm_gic",
.id = SM8550_MASTER_GIC_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
.id = SM8550_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
.id = SM8550_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = SM8550_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node qnm_mnoc_hf_disp = {
.name = "qnm_mnoc_hf_disp",
.id = SM8550_MASTER_MNOC_HF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_LLCC_DISP },
};
static struct qcom_icc_node qnm_pcie_disp = {
.name = "qnm_pcie_disp",
.id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_DISP,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_SLAVE_LLCC_DISP },
};
static struct qcom_icc_node llcc_mc_disp = {
.name = "llcc_mc_disp",
.id = SM8550_MASTER_LLCC_DISP,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_EBI1_DISP },
};
static struct qcom_icc_node qnm_mdp_disp = {
.name = "qnm_mdp_disp",
.id = SM8550_MASTER_MDP_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_DISP },
};
static struct qcom_icc_node qnm_mnoc_hf_cam_ife_0 = {
.name = "qnm_mnoc_hf_cam_ife_0",
.id = SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_LLCC_CAM_IFE_0 },
};
static struct qcom_icc_node qnm_mnoc_sf_cam_ife_0 = {
.name = "qnm_mnoc_sf_cam_ife_0",
.id = SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_LLCC_CAM_IFE_0 },
};
static struct qcom_icc_node qnm_pcie_cam_ife_0 = {
.name = "qnm_pcie_cam_ife_0",
.id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_SLAVE_LLCC_CAM_IFE_0 },
};
static struct qcom_icc_node llcc_mc_cam_ife_0 = {
.name = "llcc_mc_cam_ife_0",
.id = SM8550_MASTER_LLCC_CAM_IFE_0,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_EBI1_CAM_IFE_0 },
};
static struct qcom_icc_node qnm_camnoc_hf_cam_ife_0 = {
.name = "qnm_camnoc_hf_cam_ife_0",
.id = SM8550_MASTER_CAMNOC_HF_CAM_IFE_0,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0 },
};
static struct qcom_icc_node qnm_camnoc_icp_cam_ife_0 = {
.name = "qnm_camnoc_icp_cam_ife_0",
.id = SM8550_MASTER_CAMNOC_ICP_CAM_IFE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 },
};
static struct qcom_icc_node qnm_camnoc_sf_cam_ife_0 = {
.name = "qnm_camnoc_sf_cam_ife_0",
.id = SM8550_MASTER_CAMNOC_SF_CAM_IFE_0,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 },
};
static struct qcom_icc_node qnm_mnoc_hf_cam_ife_1 = {
.name = "qnm_mnoc_hf_cam_ife_1",
.id = SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_LLCC_CAM_IFE_1 },
};
static struct qcom_icc_node qnm_mnoc_sf_cam_ife_1 = {
.name = "qnm_mnoc_sf_cam_ife_1",
.id = SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_LLCC_CAM_IFE_1 },
};
static struct qcom_icc_node qnm_pcie_cam_ife_1 = {
.name = "qnm_pcie_cam_ife_1",
.id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_SLAVE_LLCC_CAM_IFE_1 },
};
static struct qcom_icc_node llcc_mc_cam_ife_1 = {
.name = "llcc_mc_cam_ife_1",
.id = SM8550_MASTER_LLCC_CAM_IFE_1,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_EBI1_CAM_IFE_1 },
};
static struct qcom_icc_node qnm_camnoc_hf_cam_ife_1 = {
.name = "qnm_camnoc_hf_cam_ife_1",
.id = SM8550_MASTER_CAMNOC_HF_CAM_IFE_1,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1 },
};
static struct qcom_icc_node qnm_camnoc_icp_cam_ife_1 = {
.name = "qnm_camnoc_icp_cam_ife_1",
.id = SM8550_MASTER_CAMNOC_ICP_CAM_IFE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 },
};
static struct qcom_icc_node qnm_camnoc_sf_cam_ife_1 = {
.name = "qnm_camnoc_sf_cam_ife_1",
.id = SM8550_MASTER_CAMNOC_SF_CAM_IFE_1,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 },
};
static struct qcom_icc_node qnm_mnoc_hf_cam_ife_2 = {
.name = "qnm_mnoc_hf_cam_ife_2",
.id = SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_LLCC_CAM_IFE_2 },
};
static struct qcom_icc_node qnm_mnoc_sf_cam_ife_2 = {
.name = "qnm_mnoc_sf_cam_ife_2",
.id = SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_LLCC_CAM_IFE_2 },
};
static struct qcom_icc_node qnm_pcie_cam_ife_2 = {
.name = "qnm_pcie_cam_ife_2",
.id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_SLAVE_LLCC_CAM_IFE_2 },
};
static struct qcom_icc_node llcc_mc_cam_ife_2 = {
.name = "llcc_mc_cam_ife_2",
.id = SM8550_MASTER_LLCC_CAM_IFE_2,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_SLAVE_EBI1_CAM_IFE_2 },
};
static struct qcom_icc_node qnm_camnoc_hf_cam_ife_2 = {
.name = "qnm_camnoc_hf_cam_ife_2",
.id = SM8550_MASTER_CAMNOC_HF_CAM_IFE_2,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2 },
};
static struct qcom_icc_node qnm_camnoc_icp_cam_ife_2 = {
.name = "qnm_camnoc_icp_cam_ife_2",
.id = SM8550_MASTER_CAMNOC_ICP_CAM_IFE_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 },
};
static struct qcom_icc_node qnm_camnoc_sf_cam_ife_2 = {
.name = "qnm_camnoc_sf_cam_ife_2",
.id = SM8550_MASTER_CAMNOC_SF_CAM_IFE_2,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SM8550_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_A1NOC_SNOC },
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
.id = SM8550_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_A2NOC_SNOC },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = SM8550_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
.id = SM8550_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
.id = SM8550_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
.id = SM8550_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
.id = SM8550_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SM8550_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
.id = SM8550_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SM8550_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = SM8550_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
.id = SM8550_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mxa = {
.name = "qhs_cpr_mxa",
.id = SM8550_SLAVE_RBCPR_MXA_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mxc = {
.name = "qhs_cpr_mxc",
.id = SM8550_SLAVE_RBCPR_MXC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cpr_nspcx = {
.name = "qhs_cpr_nspcx",
.id = SM8550_SLAVE_CPR_NSPCX,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SM8550_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
.id = SM8550_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
.id = SM8550_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
.id = SM8550_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qhs_i2c = {
.name = "qhs_i2c",
.id = SM8550_SLAVE_I2C,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SM8550_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SM8550_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
.id = SM8550_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
.id = SM8550_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
.id = SM8550_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
.id = SM8550_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
.id = SM8550_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SM8550_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = SM8550_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
.id = SM8550_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SM8550_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
.id = SM8550_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
.id = SM8550_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
.id = SM8550_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
.id = SM8550_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
.id = SM8550_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
.id = SM8550_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SM8550_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
.id = SM8550_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
.id = SM8550_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
.id = SM8550_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
.id = SM8550_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
.id = SM8550_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qss_lpass_qtb_cfg = {
.name = "qss_lpass_qtb_cfg",
.id = SM8550_SLAVE_LPASS_QTB_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qss_mnoc_cfg = {
.name = "qss_mnoc_cfg",
.id = SM8550_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_MASTER_CNOC_MNOC_CFG },
};
static struct qcom_icc_node qss_nsp_qtb_cfg = {
.name = "qss_nsp_qtb_cfg",
.id = SM8550_SLAVE_NSP_QTB_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qss_pcie_anoc_cfg = {
.name = "qss_pcie_anoc_cfg",
.id = SM8550_SLAVE_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_MASTER_PCIE_ANOC_CFG },
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SM8550_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SM8550_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SM8550_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qhs_tme_cfg = {
.name = "qhs_tme_cfg",
.id = SM8550_SLAVE_TME_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qss_cfg = {
.name = "qss_cfg",
.id = SM8550_SLAVE_CNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8550_MASTER_CNOC_CFG },
};
static struct qcom_icc_node qss_ddrss_cfg = {
.name = "qss_ddrss_cfg",
.id = SM8550_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qxs_boot_imem = {
.name = "qxs_boot_imem",
.id = SM8550_SLAVE_BOOT_IMEM,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SM8550_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
.id = SM8550_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 0,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
.id = SM8550_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 16,
.num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
.id = SM8550_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_GEM_NOC_CNOC },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SM8550_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_LLCC },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
.id = SM8550_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_MASTER_GEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
.name = "qns_lpass_ag_noc_gemnoc",
.id = SM8550_SLAVE_LPASS_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_LPASS_GEM_NOC },
};
static struct qcom_icc_node qns_lpass_aggnoc = {
.name = "qns_lpass_aggnoc",
.id = SM8550_SLAVE_LPIAON_NOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_LPIAON_NOC },
};
static struct qcom_icc_node qns_lpi_aon_noc = {
.name = "qns_lpi_aon_noc",
.id = SM8550_SLAVE_LPICX_NOC_LPIAON_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_LPASS_LPINOC },
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SM8550_SLAVE_EBI1,
.channels = 4,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
.id = SM8550_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_MASTER_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
.id = SM8550_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_MASTER_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
.id = SM8550_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
.id = SM8550_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_MASTER_COMPUTE_NOC },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
.id = SM8550_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node srvc_pcie_aggre_noc = {
.name = "srvc_pcie_aggre_noc",
.id = SM8550_SLAVE_SERVICE_PCIE_ANOC,
.channels = 1,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
.id = SM8550_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8550_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
.id = SM8550_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qns_llcc_disp = {
.name = "qns_llcc_disp",
.id = SM8550_SLAVE_LLCC_DISP,
.channels = 4,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_LLCC_DISP },
};
static struct qcom_icc_node ebi_disp = {
.name = "ebi_disp",
.id = SM8550_SLAVE_EBI1_DISP,
.channels = 4,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf_disp = {
.name = "qns_mem_noc_hf_disp",
.id = SM8550_SLAVE_MNOC_HF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_MASTER_MNOC_HF_MEM_NOC_DISP },
};
static struct qcom_icc_node qns_llcc_cam_ife_0 = {
.name = "qns_llcc_cam_ife_0",
.id = SM8550_SLAVE_LLCC_CAM_IFE_0,
.channels = 4,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_LLCC_CAM_IFE_0 },
};
static struct qcom_icc_node ebi_cam_ife_0 = {
.name = "ebi_cam_ife_0",
.id = SM8550_SLAVE_EBI1_CAM_IFE_0,
.channels = 4,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf_cam_ife_0 = {
.name = "qns_mem_noc_hf_cam_ife_0",
.id = SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0 },
};
static struct qcom_icc_node qns_mem_noc_sf_cam_ife_0 = {
.name = "qns_mem_noc_sf_cam_ife_0",
.id = SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0 },
};
static struct qcom_icc_node qns_llcc_cam_ife_1 = {
.name = "qns_llcc_cam_ife_1",
.id = SM8550_SLAVE_LLCC_CAM_IFE_1,
.channels = 4,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_LLCC_CAM_IFE_1 },
};
static struct qcom_icc_node ebi_cam_ife_1 = {
.name = "ebi_cam_ife_1",
.id = SM8550_SLAVE_EBI1_CAM_IFE_1,
.channels = 4,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf_cam_ife_1 = {
.name = "qns_mem_noc_hf_cam_ife_1",
.id = SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1 },
};
static struct qcom_icc_node qns_mem_noc_sf_cam_ife_1 = {
.name = "qns_mem_noc_sf_cam_ife_1",
.id = SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1 },
};
static struct qcom_icc_node qns_llcc_cam_ife_2 = {
.name = "qns_llcc_cam_ife_2",
.id = SM8550_SLAVE_LLCC_CAM_IFE_2,
.channels = 4,
.buswidth = 16,
.num_links = 1,
.links = { SM8550_MASTER_LLCC_CAM_IFE_2 },
};
static struct qcom_icc_node ebi_cam_ife_2 = {
.name = "ebi_cam_ife_2",
.id = SM8550_SLAVE_EBI1_CAM_IFE_2,
.channels = 4,
.buswidth = 4,
.num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf_cam_ife_2 = {
.name = "qns_mem_noc_hf_cam_ife_2",
.id = SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2 },
};
static struct qcom_icc_node qns_mem_noc_sf_cam_ife_2 = {
.name = "qns_mem_noc_sf_cam_ife_2",
.id = SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2 },
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.enable_mask = 0x8,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.enable_mask = 0x1,
.keepalive = true,
.num_nodes = 54,
.nodes = { &qsm_cfg, &qhs_ahb2phy0,
&qhs_ahb2phy1, &qhs_apss,
&qhs_camera_cfg, &qhs_clk_ctl,
&qhs_cpr_cx, &qhs_cpr_mmcx,
&qhs_cpr_mxa, &qhs_cpr_mxc,
&qhs_cpr_nspcx, &qhs_crypto0_cfg,
&qhs_cx_rdpm, &qhs_gpuss_cfg,
&qhs_i2c, &qhs_imem_cfg,
&qhs_ipa, &qhs_ipc_router,
&qhs_mss_cfg, &qhs_mx_rdpm,
&qhs_pcie0_cfg, &qhs_pcie1_cfg,
&qhs_pdm, &qhs_pimem_cfg,
&qhs_prng, &qhs_qdss_cfg,
&qhs_qspi, &qhs_qup1,
&qhs_qup2, &qhs_sdc2,
&qhs_sdc4, &qhs_spss_cfg,
&qhs_tcsr, &qhs_tlmm,
&qhs_ufs_mem_cfg, &qhs_usb3_0,
&qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
&qss_lpass_qtb_cfg, &qss_mnoc_cfg,
&qss_nsp_qtb_cfg, &qss_pcie_anoc_cfg,
&xs_qdss_stm, &xs_sys_tcu_cfg,
&qnm_gemnoc_cnoc, &qnm_gemnoc_pcie,
&qhs_aoss, &qhs_tme_cfg,
&qss_cfg, &qss_ddrss_cfg,
&qxs_boot_imem, &qxs_imem,
&xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_bcm bcm_cn1 = {
.name = "CN1",
.num_nodes = 1,
.nodes = { &qhs_display_cfg },
};
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
.enable_mask = 0x1,
.num_nodes = 2,
.nodes = { &qxm_nsp, &qns_nsp_gemnoc },
};
static struct qcom_icc_bcm bcm_lp0 = {
.name = "LP0",
.num_nodes = 2,
.nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc },
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.enable_mask = 0x1,
.num_nodes = 8,
.nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp,
&qnm_camnoc_sf, &qnm_vapss_hcp,
&qnm_video_cv_cpu, &qnm_video_cvp,
&qnm_video_v_cpu, &qns_mem_noc_sf },
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.keepalive = true,
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup0_core_slave },
};
static struct qcom_icc_bcm bcm_qup1 = {
.name = "QUP1",
.keepalive = true,
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup1_core_slave },
};
static struct qcom_icc_bcm bcm_qup2 = {
.name = "QUP2",
.keepalive = true,
.vote_scale = 1,
.num_nodes = 1,
.nodes = { &qup2_core_slave },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_sh1 = {
.name = "SH1",
.enable_mask = 0x1,
.num_nodes = 13,
.nodes = { &alm_gpu_tcu, &alm_sys_tcu,
&chm_apps, &qnm_gpu,
&qnm_mdsp, &qnm_mnoc_hf,
&qnm_mnoc_sf, &qnm_nsp_gemnoc,
&qnm_pcie, &qnm_snoc_gc,
&qnm_snoc_sf, &qns_gem_noc_cnoc,
&qns_pcie },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.enable_mask = 0x1,
.num_nodes = 3,
.nodes = { &qhm_gic, &xm_gic,
&qns_gemnoc_gc },
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.num_nodes = 1,
.nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.num_nodes = 1,
.nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_bcm bcm_sn7 = {
.name = "SN7",
.num_nodes = 1,
.nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_bcm bcm_acv_disp = {
.name = "ACV",
.enable_mask = 0x1,
.num_nodes = 1,
.nodes = { &ebi_disp },
};
static struct qcom_icc_bcm bcm_mc0_disp = {
.name = "MC0",
.num_nodes = 1,
.nodes = { &ebi_disp },
};
static struct qcom_icc_bcm bcm_mm0_disp = {
.name = "MM0",
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf_disp },
};
static struct qcom_icc_bcm bcm_sh0_disp = {
.name = "SH0",
.num_nodes = 1,
.nodes = { &qns_llcc_disp },
};
static struct qcom_icc_bcm bcm_sh1_disp = {
.name = "SH1",
.enable_mask = 0x1,
.num_nodes = 2,
.nodes = { &qnm_mnoc_hf_disp, &qnm_pcie_disp },
};
static struct qcom_icc_bcm bcm_acv_cam_ife_0 = {
.name = "ACV",
.enable_mask = 0x0,
.num_nodes = 1,
.nodes = { &ebi_cam_ife_0 },
};
static struct qcom_icc_bcm bcm_mc0_cam_ife_0 = {
.name = "MC0",
.num_nodes = 1,
.nodes = { &ebi_cam_ife_0 },
};
static struct qcom_icc_bcm bcm_mm0_cam_ife_0 = {
.name = "MM0",
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf_cam_ife_0 },
};
static struct qcom_icc_bcm bcm_mm1_cam_ife_0 = {
.name = "MM1",
.enable_mask = 0x1,
.num_nodes = 4,
.nodes = { &qnm_camnoc_hf_cam_ife_0, &qnm_camnoc_icp_cam_ife_0,
&qnm_camnoc_sf_cam_ife_0, &qns_mem_noc_sf_cam_ife_0 },
};
static struct qcom_icc_bcm bcm_sh0_cam_ife_0 = {
.name = "SH0",
.num_nodes = 1,
.nodes = { &qns_llcc_cam_ife_0 },
};
static struct qcom_icc_bcm bcm_sh1_cam_ife_0 = {
.name = "SH1",
.enable_mask = 0x1,
.num_nodes = 3,
.nodes = { &qnm_mnoc_hf_cam_ife_0, &qnm_mnoc_sf_cam_ife_0,
&qnm_pcie_cam_ife_0 },
};
static struct qcom_icc_bcm bcm_acv_cam_ife_1 = {
.name = "ACV",
.enable_mask = 0x0,
.num_nodes = 1,
.nodes = { &ebi_cam_ife_1 },
};
static struct qcom_icc_bcm bcm_mc0_cam_ife_1 = {
.name = "MC0",
.num_nodes = 1,
.nodes = { &ebi_cam_ife_1 },
};
static struct qcom_icc_bcm bcm_mm0_cam_ife_1 = {
.name = "MM0",
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf_cam_ife_1 },
};
static struct qcom_icc_bcm bcm_mm1_cam_ife_1 = {
.name = "MM1",
.enable_mask = 0x1,
.num_nodes = 4,
.nodes = { &qnm_camnoc_hf_cam_ife_1, &qnm_camnoc_icp_cam_ife_1,
&qnm_camnoc_sf_cam_ife_1, &qns_mem_noc_sf_cam_ife_1 },
};
static struct qcom_icc_bcm bcm_sh0_cam_ife_1 = {
.name = "SH0",
.num_nodes = 1,
.nodes = { &qns_llcc_cam_ife_1 },
};
static struct qcom_icc_bcm bcm_sh1_cam_ife_1 = {
.name = "SH1",
.enable_mask = 0x1,
.num_nodes = 3,
.nodes = { &qnm_mnoc_hf_cam_ife_1, &qnm_mnoc_sf_cam_ife_1,
&qnm_pcie_cam_ife_1 },
};
static struct qcom_icc_bcm bcm_acv_cam_ife_2 = {
.name = "ACV",
.enable_mask = 0x0,
.num_nodes = 1,
.nodes = { &ebi_cam_ife_2 },
};
static struct qcom_icc_bcm bcm_mc0_cam_ife_2 = {
.name = "MC0",
.num_nodes = 1,
.nodes = { &ebi_cam_ife_2 },
};
static struct qcom_icc_bcm bcm_mm0_cam_ife_2 = {
.name = "MM0",
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf_cam_ife_2 },
};
static struct qcom_icc_bcm bcm_mm1_cam_ife_2 = {
.name = "MM1",
.enable_mask = 0x1,
.num_nodes = 4,
.nodes = { &qnm_camnoc_hf_cam_ife_2, &qnm_camnoc_icp_cam_ife_2,
&qnm_camnoc_sf_cam_ife_2, &qns_mem_noc_sf_cam_ife_2 },
};
static struct qcom_icc_bcm bcm_sh0_cam_ife_2 = {
.name = "SH0",
.num_nodes = 1,
.nodes = { &qns_llcc_cam_ife_2 },
};
static struct qcom_icc_bcm bcm_sh1_cam_ife_2 = {
.name = "SH1",
.enable_mask = 0x1,
.num_nodes = 3,
.nodes = { &qnm_mnoc_hf_cam_ife_2, &qnm_mnoc_sf_cam_ife_2,
&qnm_pcie_cam_ife_2 },
};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_SDCC_4] = &xm_sdc4,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[MASTER_USB3_0] = &xm_usb3_0,
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
};
static const struct qcom_icc_desc sm8550_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_2] = &qhm_qup2,
[MASTER_CRYPTO] = &qxm_crypto,
[MASTER_IPA] = &qxm_ipa,
[MASTER_SP] = &qxm_sp,
[MASTER_QDSS_ETR] = &xm_qdss_etr_0,
[MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
[MASTER_SDCC_2] = &xm_sdc2,
[SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
};
static const struct qcom_icc_desc sm8550_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
};
static const struct qcom_icc_desc sm8550_clk_virt = {
.nodes = clk_virt_nodes,
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
};
static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_CNOC_CFG] = &qsm_cfg,
[SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
[SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
[SLAVE_APPSS] = &qhs_apss,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
[SLAVE_RBCPR_MXA_CFG] = &qhs_cpr_mxa,
[SLAVE_RBCPR_MXC_CFG] = &qhs_cpr_mxc,
[SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_CX_RDPM] = &qhs_cx_rdpm,
[SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
[SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_I2C] = &qhs_i2c,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
[SLAVE_CNOC_MSS] = &qhs_mss_cfg,
[SLAVE_MX_RDPM] = &qhs_mx_rdpm,
[SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
[SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QSPI_0] = &qhs_qspi,
[SLAVE_QUP_1] = &qhs_qup1,
[SLAVE_QUP_2] = &qhs_qup2,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SDCC_4] = &qhs_sdc4,
[SLAVE_SPSS_CFG] = &qhs_spss_cfg,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM] = &qhs_tlmm,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB3_0] = &qhs_usb3_0,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_LPASS_QTB_CFG] = &qss_lpass_qtb_cfg,
[SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg,
[SLAVE_NSP_QTB_CFG] = &qss_nsp_qtb_cfg,
[SLAVE_PCIE_ANOC_CFG] = &qss_pcie_anoc_cfg,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sm8550_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
&bcm_cn0,
};
static struct qcom_icc_node * const cnoc_main_nodes[] = {
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_TME_CFG] = &qhs_tme_cfg,
[SLAVE_CNOC_CFG] = &qss_cfg,
[SLAVE_DDRSS_CFG] = &qss_ddrss_cfg,
[SLAVE_BOOT_IMEM] = &qxs_boot_imem,
[SLAVE_IMEM] = &qxs_imem,
[SLAVE_PCIE_0] = &xs_pcie_0,
[SLAVE_PCIE_1] = &xs_pcie_1,
};
static const struct qcom_icc_desc sm8550_cnoc_main = {
.nodes = cnoc_main_nodes,
.num_nodes = ARRAY_SIZE(cnoc_main_nodes),
.bcms = cnoc_main_bcms,
.num_bcms = ARRAY_SIZE(cnoc_main_bcms),
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh1,
&bcm_sh0_disp,
&bcm_sh1_disp,
&bcm_sh0_cam_ife_0,
&bcm_sh1_cam_ife_0,
&bcm_sh0_cam_ife_1,
&bcm_sh1_cam_ife_1,
&bcm_sh0_cam_ife_2,
&bcm_sh1_cam_ife_2,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
[MASTER_GFX3D] = &qnm_gpu,
[MASTER_LPASS_GEM_NOC] = &qnm_lpass_gemnoc,
[MASTER_MSS_PROC] = &qnm_mdsp,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc,
[MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
[MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
[MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp,
[SLAVE_LLCC_DISP] = &qns_llcc_disp,
[MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0] = &qnm_mnoc_hf_cam_ife_0,
[MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0] = &qnm_mnoc_sf_cam_ife_0,
[MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0] = &qnm_pcie_cam_ife_0,
[SLAVE_LLCC_CAM_IFE_0] = &qns_llcc_cam_ife_0,
[MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1] = &qnm_mnoc_hf_cam_ife_1,
[MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1] = &qnm_mnoc_sf_cam_ife_1,
[MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1] = &qnm_pcie_cam_ife_1,
[SLAVE_LLCC_CAM_IFE_1] = &qns_llcc_cam_ife_1,
[MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2] = &qnm_mnoc_hf_cam_ife_2,
[MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2] = &qnm_mnoc_sf_cam_ife_2,
[MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2] = &qnm_pcie_cam_ife_2,
[SLAVE_LLCC_CAM_IFE_2] = &qns_llcc_cam_ife_2,
};
static const struct qcom_icc_desc sm8550_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
};
static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_LPIAON_NOC] = &qnm_lpiaon_noc,
[SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc,
};
static const struct qcom_icc_desc sm8550_lpass_ag_noc = {
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = {
&bcm_lp0,
};
static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
[MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc,
[SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc,
};
static const struct qcom_icc_desc sm8550_lpass_lpiaon_noc = {
.nodes = lpass_lpiaon_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
.bcms = lpass_lpiaon_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms),
};
static struct qcom_icc_bcm * const lpass_lpicx_noc_bcms[] = {
};
static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
[MASTER_LPASS_PROC] = &qxm_lpinoc_dsp_axim,
[SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc,
};
static const struct qcom_icc_desc sm8550_lpass_lpicx_noc = {
.nodes = lpass_lpicx_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
.bcms = lpass_lpicx_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_lpicx_noc_bcms),
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
&bcm_acv_disp,
&bcm_mc0_disp,
&bcm_acv_cam_ife_0,
&bcm_mc0_cam_ife_0,
&bcm_acv_cam_ife_1,
&bcm_mc0_cam_ife_1,
&bcm_acv_cam_ife_2,
&bcm_mc0_cam_ife_2,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
[MASTER_LLCC_DISP] = &llcc_mc_disp,
[SLAVE_EBI1_DISP] = &ebi_disp,
[MASTER_LLCC_CAM_IFE_0] = &llcc_mc_cam_ife_0,
[SLAVE_EBI1_CAM_IFE_0] = &ebi_cam_ife_0,
[MASTER_LLCC_CAM_IFE_1] = &llcc_mc_cam_ife_1,
[SLAVE_EBI1_CAM_IFE_1] = &ebi_cam_ife_1,
[MASTER_LLCC_CAM_IFE_2] = &llcc_mc_cam_ife_2,
[SLAVE_EBI1_CAM_IFE_2] = &ebi_cam_ife_2,
};
static const struct qcom_icc_desc sm8550_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm0_disp,
&bcm_mm0_cam_ife_0,
&bcm_mm1_cam_ife_0,
&bcm_mm0_cam_ife_1,
&bcm_mm1_cam_ife_1,
&bcm_mm0_cam_ife_2,
&bcm_mm1_cam_ife_2,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
[MASTER_MDP] = &qnm_mdp,
[MASTER_CDSP_HCP] = &qnm_vapss_hcp,
[MASTER_VIDEO] = &qnm_video,
[MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
[MASTER_VIDEO_PROC] = &qnm_video_cvp,
[MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
[MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
[MASTER_MDP_DISP] = &qnm_mdp_disp,
[SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
[MASTER_CAMNOC_HF_CAM_IFE_0] = &qnm_camnoc_hf_cam_ife_0,
[MASTER_CAMNOC_ICP_CAM_IFE_0] = &qnm_camnoc_icp_cam_ife_0,
[MASTER_CAMNOC_SF_CAM_IFE_0] = &qnm_camnoc_sf_cam_ife_0,
[SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0] = &qns_mem_noc_hf_cam_ife_0,
[SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0] = &qns_mem_noc_sf_cam_ife_0,
[MASTER_CAMNOC_HF_CAM_IFE_1] = &qnm_camnoc_hf_cam_ife_1,
[MASTER_CAMNOC_ICP_CAM_IFE_1] = &qnm_camnoc_icp_cam_ife_1,
[MASTER_CAMNOC_SF_CAM_IFE_1] = &qnm_camnoc_sf_cam_ife_1,
[SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1] = &qns_mem_noc_hf_cam_ife_1,
[SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1] = &qns_mem_noc_sf_cam_ife_1,
[MASTER_CAMNOC_HF_CAM_IFE_2] = &qnm_camnoc_hf_cam_ife_2,
[MASTER_CAMNOC_ICP_CAM_IFE_2] = &qnm_camnoc_icp_cam_ife_2,
[MASTER_CAMNOC_SF_CAM_IFE_2] = &qnm_camnoc_sf_cam_ife_2,
[SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2] = &qns_mem_noc_hf_cam_ife_2,
[SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2] = &qns_mem_noc_sf_cam_ife_2,
};
static const struct qcom_icc_desc sm8550_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
&bcm_co0,
};
static struct qcom_icc_node * const nsp_noc_nodes[] = {
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
};
static const struct qcom_icc_desc sm8550_nsp_noc = {
.nodes = nsp_noc_nodes,
.num_nodes = ARRAY_SIZE(nsp_noc_nodes),
.bcms = nsp_noc_bcms,
.num_bcms = ARRAY_SIZE(nsp_noc_bcms),
};
static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
&bcm_sn7,
};
static struct qcom_icc_node * const pcie_anoc_nodes[] = {
[MASTER_PCIE_ANOC_CFG] = &qsm_pcie_anoc_cfg,
[MASTER_PCIE_0] = &xm_pcie3_0,
[MASTER_PCIE_1] = &xm_pcie3_1,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
[SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
};
static const struct qcom_icc_desc sm8550_pcie_anoc = {
.nodes = pcie_anoc_nodes,
.num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
.bcms = pcie_anoc_bcms,
.num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
&bcm_sn3,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_GIC_AHB] = &qhm_gic,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_GIC] = &xm_gic,
[SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
};
static const struct qcom_icc_desc sm8550_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sm8550-aggre1-noc",
.data = &sm8550_aggre1_noc},
{ .compatible = "qcom,sm8550-aggre2-noc",
.data = &sm8550_aggre2_noc},
{ .compatible = "qcom,sm8550-clk-virt",
.data = &sm8550_clk_virt},
{ .compatible = "qcom,sm8550-config-noc",
.data = &sm8550_config_noc},
{ .compatible = "qcom,sm8550-cnoc-main",
.data = &sm8550_cnoc_main},
{ .compatible = "qcom,sm8550-gem-noc",
.data = &sm8550_gem_noc},
{ .compatible = "qcom,sm8550-lpass-ag-noc",
.data = &sm8550_lpass_ag_noc},
{ .compatible = "qcom,sm8550-lpass-lpiaon-noc",
.data = &sm8550_lpass_lpiaon_noc},
{ .compatible = "qcom,sm8550-lpass-lpicx-noc",
.data = &sm8550_lpass_lpicx_noc},
{ .compatible = "qcom,sm8550-mc-virt",
.data = &sm8550_mc_virt},
{ .compatible = "qcom,sm8550-mmss-noc",
.data = &sm8550_mmss_noc},
{ .compatible = "qcom,sm8550-nsp-noc",
.data = &sm8550_nsp_noc},
{ .compatible = "qcom,sm8550-pcie-anoc",
.data = &sm8550_pcie_anoc},
{ .compatible = "qcom,sm8550-system-noc",
.data = &sm8550_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8550",
.of_match_table = qnoc_of_match,
},
};
static int __init qnoc_driver_init(void)
{
return platform_driver_register(&qnoc_driver);
}
core_initcall(qnoc_driver_init);
static void __exit qnoc_driver_exit(void)
{
platform_driver_unregister(&qnoc_driver);
}
module_exit(qnoc_driver_exit);
MODULE_DESCRIPTION("sm8550 NoC driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/interconnect/qcom/sm8550.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sm8150.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sm8150.h"
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
.id = SM8150_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_SLAVE_SERVICE_A1NOC },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
.id = SM8150_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_emac = {
.name = "xm_emac",
.id = SM8150_MASTER_EMAC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SM8150_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.id = SM8150_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
.id = SM8150_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
.id = SM8150_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_SLAVE_SERVICE_A2NOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SM8150_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
.id = SM8150_MASTER_QSPI,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.id = SM8150_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
.id = SM8150_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_sensorss_ahb = {
.name = "qhm_sensorss_ahb",
.id = SM8150_MASTER_SENSORS_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_tsif = {
.name = "qhm_tsif",
.id = SM8150_MASTER_TSIF,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qnm_cnoc = {
.name = "qnm_cnoc",
.id = SM8150_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SM8150_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SM8150_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
.id = SM8150_MASTER_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
.id = SM8150_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
.id = SM8150_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
.id = SM8150_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
.id = SM8150_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
.name = "qxm_camnoc_hf0_uncomp",
.id = SM8150_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
.name = "qxm_camnoc_hf1_uncomp",
.id = SM8150_MASTER_CAMNOC_HF1_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
.name = "qxm_camnoc_sf_uncomp",
.id = SM8150_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_CAMNOC_UNCOMP },
};
static struct qcom_icc_node qnm_npu = {
.name = "qnm_npu",
.id = SM8150_MASTER_NPU,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_CDSP_MEM_NOC },
};
static struct qcom_icc_node qhm_spdm = {
.name = "qhm_spdm",
.id = SM8150_MASTER_SPDM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_SLAVE_CNOC_A2NOC },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
.id = SM8150_SNOC_CNOC_MAS,
.channels = 1,
.buswidth = 8,
.num_links = 50,
.links = { SM8150_SLAVE_TLMM_SOUTH,
SM8150_SLAVE_CDSP_CFG,
SM8150_SLAVE_SPSS_CFG,
SM8150_SLAVE_CAMERA_CFG,
SM8150_SLAVE_SDCC_4,
SM8150_SLAVE_SDCC_2,
SM8150_SLAVE_CNOC_MNOC_CFG,
SM8150_SLAVE_EMAC_CFG,
SM8150_SLAVE_UFS_MEM_CFG,
SM8150_SLAVE_TLMM_EAST,
SM8150_SLAVE_SSC_CFG,
SM8150_SLAVE_SNOC_CFG,
SM8150_SLAVE_NORTH_PHY_CFG,
SM8150_SLAVE_QUP_0,
SM8150_SLAVE_GLM,
SM8150_SLAVE_PCIE_1_CFG,
SM8150_SLAVE_A2NOC_CFG,
SM8150_SLAVE_QDSS_CFG,
SM8150_SLAVE_DISPLAY_CFG,
SM8150_SLAVE_TCSR,
SM8150_SLAVE_CNOC_DDRSS,
SM8150_SLAVE_RBCPR_MMCX_CFG,
SM8150_SLAVE_NPU_CFG,
SM8150_SLAVE_PCIE_0_CFG,
SM8150_SLAVE_GRAPHICS_3D_CFG,
SM8150_SLAVE_VENUS_CFG,
SM8150_SLAVE_TSIF,
SM8150_SLAVE_IPA_CFG,
SM8150_SLAVE_CLK_CTL,
SM8150_SLAVE_AOP,
SM8150_SLAVE_QUP_1,
SM8150_SLAVE_AHB2PHY_SOUTH,
SM8150_SLAVE_USB3_1,
SM8150_SLAVE_SERVICE_CNOC,
SM8150_SLAVE_UFS_CARD_CFG,
SM8150_SLAVE_QUP_2,
SM8150_SLAVE_RBCPR_CX_CFG,
SM8150_SLAVE_TLMM_WEST,
SM8150_SLAVE_A1NOC_CFG,
SM8150_SLAVE_AOSS,
SM8150_SLAVE_PRNG,
SM8150_SLAVE_VSENSE_CTRL_CFG,
SM8150_SLAVE_QSPI,
SM8150_SLAVE_USB3,
SM8150_SLAVE_SPDM_WRAPPER,
SM8150_SLAVE_CRYPTO_0_CFG,
SM8150_SLAVE_PIMEM_CFG,
SM8150_SLAVE_TLMM_NORTH,
SM8150_SLAVE_RBCPR_MX_CFG,
SM8150_SLAVE_IMEM_CFG
},
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
.id = SM8150_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 51,
.links = { SM8150_SLAVE_TLMM_SOUTH,
SM8150_SLAVE_CDSP_CFG,
SM8150_SLAVE_SPSS_CFG,
SM8150_SLAVE_CAMERA_CFG,
SM8150_SLAVE_SDCC_4,
SM8150_SLAVE_SDCC_2,
SM8150_SLAVE_CNOC_MNOC_CFG,
SM8150_SLAVE_EMAC_CFG,
SM8150_SLAVE_UFS_MEM_CFG,
SM8150_SLAVE_TLMM_EAST,
SM8150_SLAVE_SSC_CFG,
SM8150_SLAVE_SNOC_CFG,
SM8150_SLAVE_NORTH_PHY_CFG,
SM8150_SLAVE_QUP_0,
SM8150_SLAVE_GLM,
SM8150_SLAVE_PCIE_1_CFG,
SM8150_SLAVE_A2NOC_CFG,
SM8150_SLAVE_QDSS_CFG,
SM8150_SLAVE_DISPLAY_CFG,
SM8150_SLAVE_TCSR,
SM8150_SLAVE_CNOC_DDRSS,
SM8150_SLAVE_CNOC_A2NOC,
SM8150_SLAVE_RBCPR_MMCX_CFG,
SM8150_SLAVE_NPU_CFG,
SM8150_SLAVE_PCIE_0_CFG,
SM8150_SLAVE_GRAPHICS_3D_CFG,
SM8150_SLAVE_VENUS_CFG,
SM8150_SLAVE_TSIF,
SM8150_SLAVE_IPA_CFG,
SM8150_SLAVE_CLK_CTL,
SM8150_SLAVE_AOP,
SM8150_SLAVE_QUP_1,
SM8150_SLAVE_AHB2PHY_SOUTH,
SM8150_SLAVE_USB3_1,
SM8150_SLAVE_SERVICE_CNOC,
SM8150_SLAVE_UFS_CARD_CFG,
SM8150_SLAVE_QUP_2,
SM8150_SLAVE_RBCPR_CX_CFG,
SM8150_SLAVE_TLMM_WEST,
SM8150_SLAVE_A1NOC_CFG,
SM8150_SLAVE_AOSS,
SM8150_SLAVE_PRNG,
SM8150_SLAVE_VSENSE_CTRL_CFG,
SM8150_SLAVE_QSPI,
SM8150_SLAVE_USB3,
SM8150_SLAVE_SPDM_WRAPPER,
SM8150_SLAVE_CRYPTO_0_CFG,
SM8150_SLAVE_PIMEM_CFG,
SM8150_SLAVE_TLMM_NORTH,
SM8150_SLAVE_RBCPR_MX_CFG,
SM8150_SLAVE_IMEM_CFG
},
};
static struct qcom_icc_node qhm_cnoc_dc_noc = {
.name = "qhm_cnoc_dc_noc",
.id = SM8150_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SM8150_SLAVE_GEM_NOC_CFG,
SM8150_SLAVE_LLCC_CFG
},
};
static struct qcom_icc_node acm_apps = {
.name = "acm_apps",
.id = SM8150_MASTER_AMPSS_M0,
.channels = 2,
.buswidth = 32,
.num_links = 3,
.links = { SM8150_SLAVE_ECC,
SM8150_SLAVE_LLCC,
SM8150_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node acm_gpu_tcu = {
.name = "acm_gpu_tcu",
.id = SM8150_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8150_SLAVE_LLCC,
SM8150_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node acm_sys_tcu = {
.name = "acm_sys_tcu",
.id = SM8150_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8150_SLAVE_LLCC,
SM8150_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node qhm_gemnoc_cfg = {
.name = "qhm_gemnoc_cfg",
.id = SM8150_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SM8150_SLAVE_SERVICE_GEM_NOC,
SM8150_SLAVE_MSS_PROC_MS_MPU_CFG
},
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
.id = SM8150_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
.links = { SM8150_SLAVE_ECC,
SM8150_SLAVE_LLCC,
SM8150_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
.id = SM8150_MASTER_GRAPHICS_3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8150_SLAVE_LLCC,
SM8150_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SM8150_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SM8150_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
.links = { SM8150_SLAVE_LLCC,
SM8150_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
.id = SM8150_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
.links = { SM8150_SLAVE_LLCC,
SM8150_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SM8150_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SM8150_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8150_SLAVE_LLCC },
};
static struct qcom_icc_node qxm_ecc = {
.name = "qxm_ecc",
.id = SM8150_MASTER_ECC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_LLCC },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SM8150_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_SLAVE_EBI_CH0 },
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
.id = SM8150_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_SLAVE_SERVICE_MNOC },
};
static struct qcom_icc_node qxm_camnoc_hf0 = {
.name = "qxm_camnoc_hf0",
.id = SM8150_MASTER_CAMNOC_HF0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_hf1 = {
.name = "qxm_camnoc_hf1",
.id = SM8150_MASTER_CAMNOC_HF1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
.id = SM8150_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
.id = SM8150_MASTER_MDP_PORT0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp1 = {
.name = "qxm_mdp1",
.id = SM8150_MASTER_MDP_PORT1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
.id = SM8150_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_venus0 = {
.name = "qxm_venus0",
.id = SM8150_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_venus1 = {
.name = "qxm_venus1",
.id = SM8150_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_venus_arm9 = {
.name = "qxm_venus_arm9",
.id = SM8150_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
.id = SM8150_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
.id = SM8150_A1NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 6,
.links = { SM8150_SLAVE_SNOC_GEM_NOC_SF,
SM8150_SLAVE_PIMEM,
SM8150_SLAVE_OCIMEM,
SM8150_SLAVE_APPSS,
SM8150_SNOC_CNOC_SLV,
SM8150_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
.id = SM8150_A2NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 9,
.links = { SM8150_SLAVE_SNOC_GEM_NOC_SF,
SM8150_SLAVE_PIMEM,
SM8150_SLAVE_OCIMEM,
SM8150_SLAVE_APPSS,
SM8150_SNOC_CNOC_SLV,
SM8150_SLAVE_PCIE_0,
SM8150_SLAVE_PCIE_1,
SM8150_SLAVE_TCU,
SM8150_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qnm_gemnoc = {
.name = "qnm_gemnoc",
.id = SM8150_MASTER_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 6,
.links = { SM8150_SLAVE_PIMEM,
SM8150_SLAVE_OCIMEM,
SM8150_SLAVE_APPSS,
SM8150_SNOC_CNOC_SLV,
SM8150_SLAVE_TCU,
SM8150_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.id = SM8150_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8150_SLAVE_SNOC_GEM_NOC_GC,
SM8150_SLAVE_OCIMEM
},
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = SM8150_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8150_SLAVE_SNOC_GEM_NOC_GC,
SM8150_SLAVE_OCIMEM
},
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SM8150_A1NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8150_A1NOC_SNOC_MAS },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
.id = SM8150_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
.id = SM8150_A2NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8150_A2NOC_SNOC_MAS },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
.id = SM8150_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8150_MASTER_GEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
.id = SM8150_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_camnoc_uncomp = {
.name = "qns_camnoc_uncomp",
.id = SM8150_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qns_cdsp_mem_noc = {
.name = "qns_cdsp_mem_noc",
.id = SM8150_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_MASTER_COMPUTE_NOC },
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
.id = SM8150_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_MASTER_A1NOC_CFG },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
.id = SM8150_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_MASTER_A2NOC_CFG },
};
static struct qcom_icc_node qhs_ahb2phy_south = {
.name = "qhs_ahb2phy_south",
.id = SM8150_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aop = {
.name = "qhs_aop",
.id = SM8150_SLAVE_AOP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SM8150_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
.id = SM8150_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SM8150_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute_dsp = {
.name = "qhs_compute_dsp",
.id = SM8150_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = SM8150_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
.id = SM8150_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
.id = SM8150_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SM8150_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
.id = SM8150_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_MASTER_CNOC_DC_NOC },
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
.id = SM8150_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emac_cfg = {
.name = "qhs_emac_cfg",
.id = SM8150_SLAVE_EMAC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_glm = {
.name = "qhs_glm",
.id = SM8150_SLAVE_GLM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
.id = SM8150_SLAVE_GRAPHICS_3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SM8150_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SM8150_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
.id = SM8150_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_MASTER_CNOC_MNOC_CFG },
};
static struct qcom_icc_node qhs_npu_cfg = {
.name = "qhs_npu_cfg",
.id = SM8150_SLAVE_NPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
.id = SM8150_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
.id = SM8150_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_phy_refgen_north = {
.name = "qhs_phy_refgen_north",
.id = SM8150_SLAVE_NORTH_PHY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = SM8150_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
.id = SM8150_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SM8150_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
.id = SM8150_SLAVE_QSPI,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_east = {
.name = "qhs_qupv3_east",
.id = SM8150_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_north = {
.name = "qhs_qupv3_north",
.id = SM8150_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_south = {
.name = "qhs_qupv3_south",
.id = SM8150_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
.id = SM8150_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
.id = SM8150_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
.id = SM8150_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qhs_spdm = {
.name = "qhs_spdm",
.id = SM8150_SLAVE_SPDM_WRAPPER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
.id = SM8150_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ssc_cfg = {
.name = "qhs_ssc_cfg",
.id = SM8150_SLAVE_SSC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SM8150_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_east = {
.name = "qhs_tlmm_east",
.id = SM8150_SLAVE_TLMM_EAST,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_north = {
.name = "qhs_tlmm_north",
.id = SM8150_SLAVE_TLMM_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_south = {
.name = "qhs_tlmm_south",
.id = SM8150_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_west = {
.name = "qhs_tlmm_west",
.id = SM8150_SLAVE_TLMM_WEST,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tsif = {
.name = "qhs_tsif",
.id = SM8150_SLAVE_TSIF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_card_cfg = {
.name = "qhs_ufs_card_cfg",
.id = SM8150_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
.id = SM8150_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
.id = SM8150_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_1 = {
.name = "qhs_usb3_1",
.id = SM8150_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
.id = SM8150_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
.id = SM8150_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cnoc_a2noc = {
.name = "qns_cnoc_a2noc",
.id = SM8150_SLAVE_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_MASTER_CNOC_A2NOC },
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
.id = SM8150_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
.id = SM8150_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_memnoc = {
.name = "qhs_memnoc",
.id = SM8150_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8150_MASTER_GEM_NOC_CFG },
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
.id = SM8150_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_ecc = {
.name = "qns_ecc",
.id = SM8150_SLAVE_ECC,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qns_gem_noc_snoc = {
.name = "qns_gem_noc_snoc",
.id = SM8150_SLAVE_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_MASTER_GEM_NOC_SNOC },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SM8150_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
.links = { SM8150_MASTER_LLCC },
};
static struct qcom_icc_node srvc_gemnoc = {
.name = "srvc_gemnoc",
.id = SM8150_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SM8150_SLAVE_EBI_CH0,
.channels = 4,
.buswidth = 4,
};
static struct qcom_icc_node qns2_mem_noc = {
.name = "qns2_mem_noc",
.id = SM8150_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_MASTER_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
.id = SM8150_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8150_MASTER_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
.id = SM8150_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SM8150_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
.id = SM8150_SNOC_CNOC_SLV,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_SNOC_CNOC_MAS },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
.id = SM8150_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8150_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
.id = SM8150_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8150_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SM8150_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
.id = SM8150_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SM8150_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
.id = SM8150_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
.id = SM8150_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SM8150_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SM8150_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.keepalive = false,
.num_nodes = 7,
.nodes = { &qxm_camnoc_hf0_uncomp,
&qxm_camnoc_hf1_uncomp,
&qxm_camnoc_sf_uncomp,
&qxm_camnoc_hf0,
&qxm_camnoc_hf1,
&qxm_mdp0,
&qxm_mdp1
},
};
static struct qcom_icc_bcm bcm_sh2 = {
.name = "SH2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_gem_noc_snoc },
};
static struct qcom_icc_bcm bcm_mm2 = {
.name = "MM2",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qxm_camnoc_sf, &qns2_mem_noc },
};
static struct qcom_icc_bcm bcm_sh3 = {
.name = "SH3",
.keepalive = false,
.num_nodes = 2,
.nodes = { &acm_gpu_tcu, &acm_sys_tcu },
};
static struct qcom_icc_bcm bcm_mm3 = {
.name = "MM3",
.keepalive = false,
.num_nodes = 4,
.nodes = { &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
};
static struct qcom_icc_bcm bcm_sh4 = {
.name = "SH4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_bcm bcm_sh5 = {
.name = "SH5",
.keepalive = false,
.num_nodes = 1,
.nodes = { &acm_apps },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_cdsp_mem_noc },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_imem },
};
static struct qcom_icc_bcm bcm_co1 = {
.name = "CO1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_npu },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.keepalive = true,
.num_nodes = 53,
.nodes = { &qhm_spdm,
&qnm_snoc,
&qhs_a1_noc_cfg,
&qhs_a2_noc_cfg,
&qhs_ahb2phy_south,
&qhs_aop,
&qhs_aoss,
&qhs_camera_cfg,
&qhs_clk_ctl,
&qhs_compute_dsp,
&qhs_cpr_cx,
&qhs_cpr_mmcx,
&qhs_cpr_mx,
&qhs_crypto0_cfg,
&qhs_ddrss_cfg,
&qhs_display_cfg,
&qhs_emac_cfg,
&qhs_glm,
&qhs_gpuss_cfg,
&qhs_imem_cfg,
&qhs_ipa,
&qhs_mnoc_cfg,
&qhs_npu_cfg,
&qhs_pcie0_cfg,
&qhs_pcie1_cfg,
&qhs_phy_refgen_north,
&qhs_pimem_cfg,
&qhs_prng,
&qhs_qdss_cfg,
&qhs_qspi,
&qhs_qupv3_east,
&qhs_qupv3_north,
&qhs_qupv3_south,
&qhs_sdc2,
&qhs_sdc4,
&qhs_snoc_cfg,
&qhs_spdm,
&qhs_spss_cfg,
&qhs_ssc_cfg,
&qhs_tcsr,
&qhs_tlmm_east,
&qhs_tlmm_north,
&qhs_tlmm_south,
&qhs_tlmm_west,
&qhs_tsif,
&qhs_ufs_card_cfg,
&qhs_ufs_mem_cfg,
&qhs_usb3_0,
&qhs_usb3_1,
&qhs_venus_cfg,
&qhs_vsense_ctrl_cfg,
&qns_cnoc_a2noc,
&srvc_cnoc
},
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.keepalive = false,
.num_nodes = 3,
.nodes = { &qhm_qup0, &qhm_qup1, &qhm_qup2 },
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.keepalive = false,
.num_nodes = 3,
.nodes = { &srvc_aggre1_noc, &srvc_aggre2_noc, &qns_cnoc },
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_pimem },
};
static struct qcom_icc_bcm bcm_sn5 = {
.name = "SN5",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_qdss_stm },
};
static struct qcom_icc_bcm bcm_sn8 = {
.name = "SN8",
.keepalive = false,
.num_nodes = 2,
.nodes = { &xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_bcm bcm_sn9 = {
.name = "SN9",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_bcm bcm_sn11 = {
.name = "SN11",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_bcm bcm_sn12 = {
.name = "SN12",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qxm_pimem, &xm_gic },
};
static struct qcom_icc_bcm bcm_sn14 = {
.name = "SN14",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_bcm bcm_sn15 = {
.name = "SN15",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_gemnoc },
};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_qup0,
&bcm_sn3,
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_EMAC] = &xm_emac,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[MASTER_USB3] = &xm_usb3_0,
[MASTER_USB3_1] = &xm_usb3_1,
[A1NOC_SNOC_SLV] = &qns_a1noc_snoc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
static const struct qcom_icc_desc sm8150_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_qup0,
&bcm_sn14,
&bcm_sn3,
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QSPI] = &qhm_qspi,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_QUP_2] = &qhm_qup2,
[MASTER_SENSORS_AHB] = &qhm_sensorss_ahb,
[MASTER_TSIF] = &qhm_tsif,
[MASTER_CNOC_A2NOC] = &qnm_cnoc,
[MASTER_CRYPTO_CORE_0] = &qxm_crypto,
[MASTER_IPA] = &qxm_ipa,
[MASTER_PCIE] = &xm_pcie3_0,
[MASTER_PCIE_1] = &xm_pcie3_1,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[MASTER_SDCC_2] = &xm_sdc2,
[MASTER_SDCC_4] = &xm_sdc4,
[A2NOC_SNOC_SLV] = &qns_a2noc_snoc,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
static const struct qcom_icc_desc sm8150_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
&bcm_mm1,
};
static struct qcom_icc_node * const camnoc_virt_nodes[] = {
[MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
[SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
};
static const struct qcom_icc_desc sm8150_camnoc_virt = {
.nodes = camnoc_virt_nodes,
.num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
.bcms = camnoc_virt_bcms,
.num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
};
static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co1,
};
static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &qnm_npu,
[SLAVE_CDSP_MEM_NOC] = &qns_cdsp_mem_noc,
};
static const struct qcom_icc_desc sm8150_compute_noc = {
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
.num_bcms = ARRAY_SIZE(compute_noc_bcms),
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_SPDM] = &qhm_spdm,
[SNOC_CNOC_MAS] = &qnm_snoc,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
[SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy_south,
[SLAVE_AOP] = &qhs_aop,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CDSP_CFG] = &qhs_compute_dsp,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
[SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
[SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
[SLAVE_EMAC_CFG] = &qhs_emac_cfg,
[SLAVE_GLM] = &qhs_glm,
[SLAVE_GRAPHICS_3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
[SLAVE_NPU_CFG] = &qhs_npu_cfg,
[SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
[SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
[SLAVE_NORTH_PHY_CFG] = &qhs_phy_refgen_north,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QSPI] = &qhs_qspi,
[SLAVE_QUP_2] = &qhs_qupv3_east,
[SLAVE_QUP_1] = &qhs_qupv3_north,
[SLAVE_QUP_0] = &qhs_qupv3_south,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SDCC_4] = &qhs_sdc4,
[SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
[SLAVE_SPDM_WRAPPER] = &qhs_spdm,
[SLAVE_SPSS_CFG] = &qhs_spss_cfg,
[SLAVE_SSC_CFG] = &qhs_ssc_cfg,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM_EAST] = &qhs_tlmm_east,
[SLAVE_TLMM_NORTH] = &qhs_tlmm_north,
[SLAVE_TLMM_SOUTH] = &qhs_tlmm_south,
[SLAVE_TLMM_WEST] = &qhs_tlmm_west,
[SLAVE_TSIF] = &qhs_tsif,
[SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB3] = &qhs_usb3_0,
[SLAVE_USB3_1] = &qhs_usb3_1,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
static const struct qcom_icc_desc sm8150_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qhs_memnoc,
};
static const struct qcom_icc_desc sm8150_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
&bcm_sh5,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_AMPSS_M0] = &acm_apps,
[MASTER_GPU_TCU] = &acm_gpu_tcu,
[MASTER_SYS_TCU] = &acm_sys_tcu,
[MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
[MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
[MASTER_GRAPHICS_3D] = &qnm_gpu,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_pcie,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[MASTER_ECC] = &qxm_ecc,
[SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
[SLAVE_ECC] = &qns_ecc,
[SLAVE_GEM_NOC_SNOC] = &qns_gem_noc_snoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
};
static const struct qcom_icc_desc sm8150_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI_CH0] = &ebi,
};
static const struct qcom_icc_desc sm8150_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
&bcm_mm3,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
[MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
[MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
[MASTER_MDP_PORT0] = &qxm_mdp0,
[MASTER_MDP_PORT1] = &qxm_mdp1,
[MASTER_ROTATOR] = &qxm_rot,
[MASTER_VIDEO_P0] = &qxm_venus0,
[MASTER_VIDEO_P1] = &qxm_venus1,
[MASTER_VIDEO_PROC] = &qxm_venus_arm9,
[SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
static const struct qcom_icc_desc sm8150_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn11,
&bcm_sn12,
&bcm_sn15,
&bcm_sn2,
&bcm_sn3,
&bcm_sn4,
&bcm_sn5,
&bcm_sn8,
&bcm_sn9,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
[A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
[MASTER_GEM_NOC_SNOC] = &qnm_gemnoc,
[MASTER_PIMEM] = &qxm_pimem,
[MASTER_GIC] = &xm_gic,
[SLAVE_APPSS] = &qhs_apss,
[SNOC_CNOC_SLV] = &qns_cnoc,
[SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
[SLAVE_OCIMEM] = &qxs_imem,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
[SLAVE_PCIE_0] = &xs_pcie_0,
[SLAVE_PCIE_1] = &xs_pcie_1,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sm8150_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sm8150-aggre1-noc",
.data = &sm8150_aggre1_noc},
{ .compatible = "qcom,sm8150-aggre2-noc",
.data = &sm8150_aggre2_noc},
{ .compatible = "qcom,sm8150-camnoc-virt",
.data = &sm8150_camnoc_virt},
{ .compatible = "qcom,sm8150-compute-noc",
.data = &sm8150_compute_noc},
{ .compatible = "qcom,sm8150-config-noc",
.data = &sm8150_config_noc},
{ .compatible = "qcom,sm8150-dc-noc",
.data = &sm8150_dc_noc},
{ .compatible = "qcom,sm8150-gem-noc",
.data = &sm8150_gem_noc},
{ .compatible = "qcom,sm8150-mc-virt",
.data = &sm8150_mc_virt},
{ .compatible = "qcom,sm8150-mmss-noc",
.data = &sm8150_mmss_noc},
{ .compatible = "qcom,sm8150-system-noc",
.data = &sm8150_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8150",
.of_match_table = qnoc_of_match,
},
};
module_platform_driver(qnoc_driver);
MODULE_DESCRIPTION("Qualcomm SM8150 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/sm8150.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Brian Masney <[email protected]>
*
* Based on MSM bus code from downstream MSM kernel sources.
* Copyright (c) 2012-2013 The Linux Foundation. All rights reserved.
*
* Based on qcs404.c
* Copyright (C) 2019 Linaro Ltd
*
* Here's a rough representation that shows the various buses that form the
* Network On Chip (NOC) for the msm8974:
*
* Multimedia Subsystem (MMSS)
* |----------+-----------------------------------+-----------|
* | |
* | |
* Config | Bus Interface | Memory Controller
* |------------+-+-----------| |------------+-+-----------|
* | |
* | |
* | System |
* |--------------+-+---------------------------------+-+-------------|
* | |
* | |
* Peripheral | On Chip | Memory (OCMEM)
* |------------+-------------| |------------+-------------|
*/
#include <dt-bindings/interconnect/qcom,msm8974.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "icc-rpm.h"
enum {
MSM8974_BIMC_MAS_AMPSS_M0 = 1,
MSM8974_BIMC_MAS_AMPSS_M1,
MSM8974_BIMC_MAS_MSS_PROC,
MSM8974_BIMC_TO_MNOC,
MSM8974_BIMC_TO_SNOC,
MSM8974_BIMC_SLV_EBI_CH0,
MSM8974_BIMC_SLV_AMPSS_L2,
MSM8974_CNOC_MAS_RPM_INST,
MSM8974_CNOC_MAS_RPM_DATA,
MSM8974_CNOC_MAS_RPM_SYS,
MSM8974_CNOC_MAS_DEHR,
MSM8974_CNOC_MAS_QDSS_DAP,
MSM8974_CNOC_MAS_SPDM,
MSM8974_CNOC_MAS_TIC,
MSM8974_CNOC_SLV_CLK_CTL,
MSM8974_CNOC_SLV_CNOC_MSS,
MSM8974_CNOC_SLV_SECURITY,
MSM8974_CNOC_SLV_TCSR,
MSM8974_CNOC_SLV_TLMM,
MSM8974_CNOC_SLV_CRYPTO_0_CFG,
MSM8974_CNOC_SLV_CRYPTO_1_CFG,
MSM8974_CNOC_SLV_IMEM_CFG,
MSM8974_CNOC_SLV_MESSAGE_RAM,
MSM8974_CNOC_SLV_BIMC_CFG,
MSM8974_CNOC_SLV_BOOT_ROM,
MSM8974_CNOC_SLV_PMIC_ARB,
MSM8974_CNOC_SLV_SPDM_WRAPPER,
MSM8974_CNOC_SLV_DEHR_CFG,
MSM8974_CNOC_SLV_MPM,
MSM8974_CNOC_SLV_QDSS_CFG,
MSM8974_CNOC_SLV_RBCPR_CFG,
MSM8974_CNOC_SLV_RBCPR_QDSS_APU_CFG,
MSM8974_CNOC_TO_SNOC,
MSM8974_CNOC_SLV_CNOC_ONOC_CFG,
MSM8974_CNOC_SLV_CNOC_MNOC_MMSS_CFG,
MSM8974_CNOC_SLV_CNOC_MNOC_CFG,
MSM8974_CNOC_SLV_PNOC_CFG,
MSM8974_CNOC_SLV_SNOC_MPU_CFG,
MSM8974_CNOC_SLV_SNOC_CFG,
MSM8974_CNOC_SLV_EBI1_DLL_CFG,
MSM8974_CNOC_SLV_PHY_APU_CFG,
MSM8974_CNOC_SLV_EBI1_PHY_CFG,
MSM8974_CNOC_SLV_RPM,
MSM8974_CNOC_SLV_SERVICE_CNOC,
MSM8974_MNOC_MAS_GRAPHICS_3D,
MSM8974_MNOC_MAS_JPEG,
MSM8974_MNOC_MAS_MDP_PORT0,
MSM8974_MNOC_MAS_VIDEO_P0,
MSM8974_MNOC_MAS_VIDEO_P1,
MSM8974_MNOC_MAS_VFE,
MSM8974_MNOC_TO_CNOC,
MSM8974_MNOC_TO_BIMC,
MSM8974_MNOC_SLV_CAMERA_CFG,
MSM8974_MNOC_SLV_DISPLAY_CFG,
MSM8974_MNOC_SLV_OCMEM_CFG,
MSM8974_MNOC_SLV_CPR_CFG,
MSM8974_MNOC_SLV_CPR_XPU_CFG,
MSM8974_MNOC_SLV_MISC_CFG,
MSM8974_MNOC_SLV_MISC_XPU_CFG,
MSM8974_MNOC_SLV_VENUS_CFG,
MSM8974_MNOC_SLV_GRAPHICS_3D_CFG,
MSM8974_MNOC_SLV_MMSS_CLK_CFG,
MSM8974_MNOC_SLV_MMSS_CLK_XPU_CFG,
MSM8974_MNOC_SLV_MNOC_MPU_CFG,
MSM8974_MNOC_SLV_ONOC_MPU_CFG,
MSM8974_MNOC_SLV_SERVICE_MNOC,
MSM8974_OCMEM_NOC_TO_OCMEM_VNOC,
MSM8974_OCMEM_MAS_JPEG_OCMEM,
MSM8974_OCMEM_MAS_MDP_OCMEM,
MSM8974_OCMEM_MAS_VIDEO_P0_OCMEM,
MSM8974_OCMEM_MAS_VIDEO_P1_OCMEM,
MSM8974_OCMEM_MAS_VFE_OCMEM,
MSM8974_OCMEM_MAS_CNOC_ONOC_CFG,
MSM8974_OCMEM_SLV_SERVICE_ONOC,
MSM8974_OCMEM_VNOC_TO_SNOC,
MSM8974_OCMEM_VNOC_TO_OCMEM_NOC,
MSM8974_OCMEM_VNOC_MAS_GFX3D,
MSM8974_OCMEM_SLV_OCMEM,
MSM8974_PNOC_MAS_PNOC_CFG,
MSM8974_PNOC_MAS_SDCC_1,
MSM8974_PNOC_MAS_SDCC_3,
MSM8974_PNOC_MAS_SDCC_4,
MSM8974_PNOC_MAS_SDCC_2,
MSM8974_PNOC_MAS_TSIF,
MSM8974_PNOC_MAS_BAM_DMA,
MSM8974_PNOC_MAS_BLSP_2,
MSM8974_PNOC_MAS_USB_HSIC,
MSM8974_PNOC_MAS_BLSP_1,
MSM8974_PNOC_MAS_USB_HS,
MSM8974_PNOC_TO_SNOC,
MSM8974_PNOC_SLV_SDCC_1,
MSM8974_PNOC_SLV_SDCC_3,
MSM8974_PNOC_SLV_SDCC_2,
MSM8974_PNOC_SLV_SDCC_4,
MSM8974_PNOC_SLV_TSIF,
MSM8974_PNOC_SLV_BAM_DMA,
MSM8974_PNOC_SLV_BLSP_2,
MSM8974_PNOC_SLV_USB_HSIC,
MSM8974_PNOC_SLV_BLSP_1,
MSM8974_PNOC_SLV_USB_HS,
MSM8974_PNOC_SLV_PDM,
MSM8974_PNOC_SLV_PERIPH_APU_CFG,
MSM8974_PNOC_SLV_PNOC_MPU_CFG,
MSM8974_PNOC_SLV_PRNG,
MSM8974_PNOC_SLV_SERVICE_PNOC,
MSM8974_SNOC_MAS_LPASS_AHB,
MSM8974_SNOC_MAS_QDSS_BAM,
MSM8974_SNOC_MAS_SNOC_CFG,
MSM8974_SNOC_TO_BIMC,
MSM8974_SNOC_TO_CNOC,
MSM8974_SNOC_TO_PNOC,
MSM8974_SNOC_TO_OCMEM_VNOC,
MSM8974_SNOC_MAS_CRYPTO_CORE0,
MSM8974_SNOC_MAS_CRYPTO_CORE1,
MSM8974_SNOC_MAS_LPASS_PROC,
MSM8974_SNOC_MAS_MSS,
MSM8974_SNOC_MAS_MSS_NAV,
MSM8974_SNOC_MAS_OCMEM_DMA,
MSM8974_SNOC_MAS_WCSS,
MSM8974_SNOC_MAS_QDSS_ETR,
MSM8974_SNOC_MAS_USB3,
MSM8974_SNOC_SLV_AMPSS,
MSM8974_SNOC_SLV_LPASS,
MSM8974_SNOC_SLV_USB3,
MSM8974_SNOC_SLV_WCSS,
MSM8974_SNOC_SLV_OCIMEM,
MSM8974_SNOC_SLV_SNOC_OCMEM,
MSM8974_SNOC_SLV_SERVICE_SNOC,
MSM8974_SNOC_SLV_QDSS_STM,
};
#define RPM_BUS_MASTER_REQ 0x73616d62
#define RPM_BUS_SLAVE_REQ 0x766c7362
#define to_msm8974_icc_provider(_provider) \
container_of(_provider, struct msm8974_icc_provider, provider)
static const struct clk_bulk_data msm8974_icc_bus_clocks[] = {
{ .id = "bus" },
{ .id = "bus_a" },
};
/**
* struct msm8974_icc_provider - Qualcomm specific interconnect provider
* @provider: generic interconnect provider
* @bus_clks: the clk_bulk_data table of bus clocks
* @num_clks: the total number of clk_bulk_data entries
*/
struct msm8974_icc_provider {
struct icc_provider provider;
struct clk_bulk_data *bus_clks;
int num_clks;
};
#define MSM8974_ICC_MAX_LINKS 3
/**
* struct msm8974_icc_node - Qualcomm specific interconnect nodes
* @name: the node name used in debugfs
* @id: a unique node identifier
* @links: an array of nodes where we can go next while traversing
* @num_links: the total number of @links
* @buswidth: width of the interconnect between a node and the bus (bytes)
* @mas_rpm_id: RPM ID for devices that are bus masters
* @slv_rpm_id: RPM ID for devices that are bus slaves
* @rate: current bus clock rate in Hz
*/
struct msm8974_icc_node {
unsigned char *name;
u16 id;
u16 links[MSM8974_ICC_MAX_LINKS];
u16 num_links;
u16 buswidth;
int mas_rpm_id;
int slv_rpm_id;
u64 rate;
};
struct msm8974_icc_desc {
struct msm8974_icc_node * const *nodes;
size_t num_nodes;
};
#define DEFINE_QNODE(_name, _id, _buswidth, _mas_rpm_id, _slv_rpm_id, \
...) \
static struct msm8974_icc_node _name = { \
.name = #_name, \
.id = _id, \
.buswidth = _buswidth, \
.mas_rpm_id = _mas_rpm_id, \
.slv_rpm_id = _slv_rpm_id, \
.num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
.links = { __VA_ARGS__ }, \
}
DEFINE_QNODE(mas_ampss_m0, MSM8974_BIMC_MAS_AMPSS_M0, 8, 0, -1);
DEFINE_QNODE(mas_ampss_m1, MSM8974_BIMC_MAS_AMPSS_M1, 8, 0, -1);
DEFINE_QNODE(mas_mss_proc, MSM8974_BIMC_MAS_MSS_PROC, 8, 1, -1);
DEFINE_QNODE(bimc_to_mnoc, MSM8974_BIMC_TO_MNOC, 8, 2, -1, MSM8974_BIMC_SLV_EBI_CH0);
DEFINE_QNODE(bimc_to_snoc, MSM8974_BIMC_TO_SNOC, 8, 3, 2, MSM8974_SNOC_TO_BIMC, MSM8974_BIMC_SLV_EBI_CH0, MSM8974_BIMC_MAS_AMPSS_M0);
DEFINE_QNODE(slv_ebi_ch0, MSM8974_BIMC_SLV_EBI_CH0, 8, -1, 0);
DEFINE_QNODE(slv_ampss_l2, MSM8974_BIMC_SLV_AMPSS_L2, 8, -1, 1);
static struct msm8974_icc_node * const msm8974_bimc_nodes[] = {
[BIMC_MAS_AMPSS_M0] = &mas_ampss_m0,
[BIMC_MAS_AMPSS_M1] = &mas_ampss_m1,
[BIMC_MAS_MSS_PROC] = &mas_mss_proc,
[BIMC_TO_MNOC] = &bimc_to_mnoc,
[BIMC_TO_SNOC] = &bimc_to_snoc,
[BIMC_SLV_EBI_CH0] = &slv_ebi_ch0,
[BIMC_SLV_AMPSS_L2] = &slv_ampss_l2,
};
static const struct msm8974_icc_desc msm8974_bimc = {
.nodes = msm8974_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_bimc_nodes),
};
DEFINE_QNODE(mas_rpm_inst, MSM8974_CNOC_MAS_RPM_INST, 8, 45, -1);
DEFINE_QNODE(mas_rpm_data, MSM8974_CNOC_MAS_RPM_DATA, 8, 46, -1);
DEFINE_QNODE(mas_rpm_sys, MSM8974_CNOC_MAS_RPM_SYS, 8, 47, -1);
DEFINE_QNODE(mas_dehr, MSM8974_CNOC_MAS_DEHR, 8, 48, -1);
DEFINE_QNODE(mas_qdss_dap, MSM8974_CNOC_MAS_QDSS_DAP, 8, 49, -1);
DEFINE_QNODE(mas_spdm, MSM8974_CNOC_MAS_SPDM, 8, 50, -1);
DEFINE_QNODE(mas_tic, MSM8974_CNOC_MAS_TIC, 8, 51, -1);
DEFINE_QNODE(slv_clk_ctl, MSM8974_CNOC_SLV_CLK_CTL, 8, -1, 47);
DEFINE_QNODE(slv_cnoc_mss, MSM8974_CNOC_SLV_CNOC_MSS, 8, -1, 48);
DEFINE_QNODE(slv_security, MSM8974_CNOC_SLV_SECURITY, 8, -1, 49);
DEFINE_QNODE(slv_tcsr, MSM8974_CNOC_SLV_TCSR, 8, -1, 50);
DEFINE_QNODE(slv_tlmm, MSM8974_CNOC_SLV_TLMM, 8, -1, 51);
DEFINE_QNODE(slv_crypto_0_cfg, MSM8974_CNOC_SLV_CRYPTO_0_CFG, 8, -1, 52);
DEFINE_QNODE(slv_crypto_1_cfg, MSM8974_CNOC_SLV_CRYPTO_1_CFG, 8, -1, 53);
DEFINE_QNODE(slv_imem_cfg, MSM8974_CNOC_SLV_IMEM_CFG, 8, -1, 54);
DEFINE_QNODE(slv_message_ram, MSM8974_CNOC_SLV_MESSAGE_RAM, 8, -1, 55);
DEFINE_QNODE(slv_bimc_cfg, MSM8974_CNOC_SLV_BIMC_CFG, 8, -1, 56);
DEFINE_QNODE(slv_boot_rom, MSM8974_CNOC_SLV_BOOT_ROM, 8, -1, 57);
DEFINE_QNODE(slv_pmic_arb, MSM8974_CNOC_SLV_PMIC_ARB, 8, -1, 59);
DEFINE_QNODE(slv_spdm_wrapper, MSM8974_CNOC_SLV_SPDM_WRAPPER, 8, -1, 60);
DEFINE_QNODE(slv_dehr_cfg, MSM8974_CNOC_SLV_DEHR_CFG, 8, -1, 61);
DEFINE_QNODE(slv_mpm, MSM8974_CNOC_SLV_MPM, 8, -1, 62);
DEFINE_QNODE(slv_qdss_cfg, MSM8974_CNOC_SLV_QDSS_CFG, 8, -1, 63);
DEFINE_QNODE(slv_rbcpr_cfg, MSM8974_CNOC_SLV_RBCPR_CFG, 8, -1, 64);
DEFINE_QNODE(slv_rbcpr_qdss_apu_cfg, MSM8974_CNOC_SLV_RBCPR_QDSS_APU_CFG, 8, -1, 65);
DEFINE_QNODE(cnoc_to_snoc, MSM8974_CNOC_TO_SNOC, 8, 52, 75);
DEFINE_QNODE(slv_cnoc_onoc_cfg, MSM8974_CNOC_SLV_CNOC_ONOC_CFG, 8, -1, 68);
DEFINE_QNODE(slv_cnoc_mnoc_mmss_cfg, MSM8974_CNOC_SLV_CNOC_MNOC_MMSS_CFG, 8, -1, 58);
DEFINE_QNODE(slv_cnoc_mnoc_cfg, MSM8974_CNOC_SLV_CNOC_MNOC_CFG, 8, -1, 66);
DEFINE_QNODE(slv_pnoc_cfg, MSM8974_CNOC_SLV_PNOC_CFG, 8, -1, 69);
DEFINE_QNODE(slv_snoc_mpu_cfg, MSM8974_CNOC_SLV_SNOC_MPU_CFG, 8, -1, 67);
DEFINE_QNODE(slv_snoc_cfg, MSM8974_CNOC_SLV_SNOC_CFG, 8, -1, 70);
DEFINE_QNODE(slv_ebi1_dll_cfg, MSM8974_CNOC_SLV_EBI1_DLL_CFG, 8, -1, 71);
DEFINE_QNODE(slv_phy_apu_cfg, MSM8974_CNOC_SLV_PHY_APU_CFG, 8, -1, 72);
DEFINE_QNODE(slv_ebi1_phy_cfg, MSM8974_CNOC_SLV_EBI1_PHY_CFG, 8, -1, 73);
DEFINE_QNODE(slv_rpm, MSM8974_CNOC_SLV_RPM, 8, -1, 74);
DEFINE_QNODE(slv_service_cnoc, MSM8974_CNOC_SLV_SERVICE_CNOC, 8, -1, 76);
static struct msm8974_icc_node * const msm8974_cnoc_nodes[] = {
[CNOC_MAS_RPM_INST] = &mas_rpm_inst,
[CNOC_MAS_RPM_DATA] = &mas_rpm_data,
[CNOC_MAS_RPM_SYS] = &mas_rpm_sys,
[CNOC_MAS_DEHR] = &mas_dehr,
[CNOC_MAS_QDSS_DAP] = &mas_qdss_dap,
[CNOC_MAS_SPDM] = &mas_spdm,
[CNOC_MAS_TIC] = &mas_tic,
[CNOC_SLV_CLK_CTL] = &slv_clk_ctl,
[CNOC_SLV_CNOC_MSS] = &slv_cnoc_mss,
[CNOC_SLV_SECURITY] = &slv_security,
[CNOC_SLV_TCSR] = &slv_tcsr,
[CNOC_SLV_TLMM] = &slv_tlmm,
[CNOC_SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
[CNOC_SLV_CRYPTO_1_CFG] = &slv_crypto_1_cfg,
[CNOC_SLV_IMEM_CFG] = &slv_imem_cfg,
[CNOC_SLV_MESSAGE_RAM] = &slv_message_ram,
[CNOC_SLV_BIMC_CFG] = &slv_bimc_cfg,
[CNOC_SLV_BOOT_ROM] = &slv_boot_rom,
[CNOC_SLV_PMIC_ARB] = &slv_pmic_arb,
[CNOC_SLV_SPDM_WRAPPER] = &slv_spdm_wrapper,
[CNOC_SLV_DEHR_CFG] = &slv_dehr_cfg,
[CNOC_SLV_MPM] = &slv_mpm,
[CNOC_SLV_QDSS_CFG] = &slv_qdss_cfg,
[CNOC_SLV_RBCPR_CFG] = &slv_rbcpr_cfg,
[CNOC_SLV_RBCPR_QDSS_APU_CFG] = &slv_rbcpr_qdss_apu_cfg,
[CNOC_TO_SNOC] = &cnoc_to_snoc,
[CNOC_SLV_CNOC_ONOC_CFG] = &slv_cnoc_onoc_cfg,
[CNOC_SLV_CNOC_MNOC_MMSS_CFG] = &slv_cnoc_mnoc_mmss_cfg,
[CNOC_SLV_CNOC_MNOC_CFG] = &slv_cnoc_mnoc_cfg,
[CNOC_SLV_PNOC_CFG] = &slv_pnoc_cfg,
[CNOC_SLV_SNOC_MPU_CFG] = &slv_snoc_mpu_cfg,
[CNOC_SLV_SNOC_CFG] = &slv_snoc_cfg,
[CNOC_SLV_EBI1_DLL_CFG] = &slv_ebi1_dll_cfg,
[CNOC_SLV_PHY_APU_CFG] = &slv_phy_apu_cfg,
[CNOC_SLV_EBI1_PHY_CFG] = &slv_ebi1_phy_cfg,
[CNOC_SLV_RPM] = &slv_rpm,
[CNOC_SLV_SERVICE_CNOC] = &slv_service_cnoc,
};
static const struct msm8974_icc_desc msm8974_cnoc = {
.nodes = msm8974_cnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_cnoc_nodes),
};
DEFINE_QNODE(mas_graphics_3d, MSM8974_MNOC_MAS_GRAPHICS_3D, 16, 6, -1, MSM8974_MNOC_TO_BIMC);
DEFINE_QNODE(mas_jpeg, MSM8974_MNOC_MAS_JPEG, 16, 7, -1, MSM8974_MNOC_TO_BIMC);
DEFINE_QNODE(mas_mdp_port0, MSM8974_MNOC_MAS_MDP_PORT0, 16, 8, -1, MSM8974_MNOC_TO_BIMC);
DEFINE_QNODE(mas_video_p0, MSM8974_MNOC_MAS_VIDEO_P0, 16, 9, -1);
DEFINE_QNODE(mas_video_p1, MSM8974_MNOC_MAS_VIDEO_P1, 16, 10, -1);
DEFINE_QNODE(mas_vfe, MSM8974_MNOC_MAS_VFE, 16, 11, -1, MSM8974_MNOC_TO_BIMC);
DEFINE_QNODE(mnoc_to_cnoc, MSM8974_MNOC_TO_CNOC, 16, 4, -1);
DEFINE_QNODE(mnoc_to_bimc, MSM8974_MNOC_TO_BIMC, 16, -1, 16, MSM8974_BIMC_TO_MNOC);
DEFINE_QNODE(slv_camera_cfg, MSM8974_MNOC_SLV_CAMERA_CFG, 16, -1, 3);
DEFINE_QNODE(slv_display_cfg, MSM8974_MNOC_SLV_DISPLAY_CFG, 16, -1, 4);
DEFINE_QNODE(slv_ocmem_cfg, MSM8974_MNOC_SLV_OCMEM_CFG, 16, -1, 5);
DEFINE_QNODE(slv_cpr_cfg, MSM8974_MNOC_SLV_CPR_CFG, 16, -1, 6);
DEFINE_QNODE(slv_cpr_xpu_cfg, MSM8974_MNOC_SLV_CPR_XPU_CFG, 16, -1, 7);
DEFINE_QNODE(slv_misc_cfg, MSM8974_MNOC_SLV_MISC_CFG, 16, -1, 8);
DEFINE_QNODE(slv_misc_xpu_cfg, MSM8974_MNOC_SLV_MISC_XPU_CFG, 16, -1, 9);
DEFINE_QNODE(slv_venus_cfg, MSM8974_MNOC_SLV_VENUS_CFG, 16, -1, 10);
DEFINE_QNODE(slv_graphics_3d_cfg, MSM8974_MNOC_SLV_GRAPHICS_3D_CFG, 16, -1, 11);
DEFINE_QNODE(slv_mmss_clk_cfg, MSM8974_MNOC_SLV_MMSS_CLK_CFG, 16, -1, 12);
DEFINE_QNODE(slv_mmss_clk_xpu_cfg, MSM8974_MNOC_SLV_MMSS_CLK_XPU_CFG, 16, -1, 13);
DEFINE_QNODE(slv_mnoc_mpu_cfg, MSM8974_MNOC_SLV_MNOC_MPU_CFG, 16, -1, 14);
DEFINE_QNODE(slv_onoc_mpu_cfg, MSM8974_MNOC_SLV_ONOC_MPU_CFG, 16, -1, 15);
DEFINE_QNODE(slv_service_mnoc, MSM8974_MNOC_SLV_SERVICE_MNOC, 16, -1, 17);
static struct msm8974_icc_node * const msm8974_mnoc_nodes[] = {
[MNOC_MAS_GRAPHICS_3D] = &mas_graphics_3d,
[MNOC_MAS_JPEG] = &mas_jpeg,
[MNOC_MAS_MDP_PORT0] = &mas_mdp_port0,
[MNOC_MAS_VIDEO_P0] = &mas_video_p0,
[MNOC_MAS_VIDEO_P1] = &mas_video_p1,
[MNOC_MAS_VFE] = &mas_vfe,
[MNOC_TO_CNOC] = &mnoc_to_cnoc,
[MNOC_TO_BIMC] = &mnoc_to_bimc,
[MNOC_SLV_CAMERA_CFG] = &slv_camera_cfg,
[MNOC_SLV_DISPLAY_CFG] = &slv_display_cfg,
[MNOC_SLV_OCMEM_CFG] = &slv_ocmem_cfg,
[MNOC_SLV_CPR_CFG] = &slv_cpr_cfg,
[MNOC_SLV_CPR_XPU_CFG] = &slv_cpr_xpu_cfg,
[MNOC_SLV_MISC_CFG] = &slv_misc_cfg,
[MNOC_SLV_MISC_XPU_CFG] = &slv_misc_xpu_cfg,
[MNOC_SLV_VENUS_CFG] = &slv_venus_cfg,
[MNOC_SLV_GRAPHICS_3D_CFG] = &slv_graphics_3d_cfg,
[MNOC_SLV_MMSS_CLK_CFG] = &slv_mmss_clk_cfg,
[MNOC_SLV_MMSS_CLK_XPU_CFG] = &slv_mmss_clk_xpu_cfg,
[MNOC_SLV_MNOC_MPU_CFG] = &slv_mnoc_mpu_cfg,
[MNOC_SLV_ONOC_MPU_CFG] = &slv_onoc_mpu_cfg,
[MNOC_SLV_SERVICE_MNOC] = &slv_service_mnoc,
};
static const struct msm8974_icc_desc msm8974_mnoc = {
.nodes = msm8974_mnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_mnoc_nodes),
};
DEFINE_QNODE(ocmem_noc_to_ocmem_vnoc, MSM8974_OCMEM_NOC_TO_OCMEM_VNOC, 16, 54, 78, MSM8974_OCMEM_SLV_OCMEM);
DEFINE_QNODE(mas_jpeg_ocmem, MSM8974_OCMEM_MAS_JPEG_OCMEM, 16, 13, -1);
DEFINE_QNODE(mas_mdp_ocmem, MSM8974_OCMEM_MAS_MDP_OCMEM, 16, 14, -1);
DEFINE_QNODE(mas_video_p0_ocmem, MSM8974_OCMEM_MAS_VIDEO_P0_OCMEM, 16, 15, -1);
DEFINE_QNODE(mas_video_p1_ocmem, MSM8974_OCMEM_MAS_VIDEO_P1_OCMEM, 16, 16, -1);
DEFINE_QNODE(mas_vfe_ocmem, MSM8974_OCMEM_MAS_VFE_OCMEM, 16, 17, -1);
DEFINE_QNODE(mas_cnoc_onoc_cfg, MSM8974_OCMEM_MAS_CNOC_ONOC_CFG, 16, 12, -1);
DEFINE_QNODE(slv_service_onoc, MSM8974_OCMEM_SLV_SERVICE_ONOC, 16, -1, 19);
DEFINE_QNODE(slv_ocmem, MSM8974_OCMEM_SLV_OCMEM, 16, -1, 18);
/* Virtual NoC is needed for connection to OCMEM */
DEFINE_QNODE(ocmem_vnoc_to_onoc, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC, 16, 56, 79, MSM8974_OCMEM_NOC_TO_OCMEM_VNOC);
DEFINE_QNODE(ocmem_vnoc_to_snoc, MSM8974_OCMEM_VNOC_TO_SNOC, 8, 57, 80);
DEFINE_QNODE(mas_v_ocmem_gfx3d, MSM8974_OCMEM_VNOC_MAS_GFX3D, 8, 55, -1, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
static struct msm8974_icc_node * const msm8974_onoc_nodes[] = {
[OCMEM_NOC_TO_OCMEM_VNOC] = &ocmem_noc_to_ocmem_vnoc,
[OCMEM_MAS_JPEG_OCMEM] = &mas_jpeg_ocmem,
[OCMEM_MAS_MDP_OCMEM] = &mas_mdp_ocmem,
[OCMEM_MAS_VIDEO_P0_OCMEM] = &mas_video_p0_ocmem,
[OCMEM_MAS_VIDEO_P1_OCMEM] = &mas_video_p1_ocmem,
[OCMEM_MAS_VFE_OCMEM] = &mas_vfe_ocmem,
[OCMEM_MAS_CNOC_ONOC_CFG] = &mas_cnoc_onoc_cfg,
[OCMEM_SLV_SERVICE_ONOC] = &slv_service_onoc,
[OCMEM_VNOC_TO_SNOC] = &ocmem_vnoc_to_snoc,
[OCMEM_VNOC_TO_OCMEM_NOC] = &ocmem_vnoc_to_onoc,
[OCMEM_VNOC_MAS_GFX3D] = &mas_v_ocmem_gfx3d,
[OCMEM_SLV_OCMEM] = &slv_ocmem,
};
static const struct msm8974_icc_desc msm8974_onoc = {
.nodes = msm8974_onoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_onoc_nodes),
};
DEFINE_QNODE(mas_pnoc_cfg, MSM8974_PNOC_MAS_PNOC_CFG, 8, 43, -1);
DEFINE_QNODE(mas_sdcc_1, MSM8974_PNOC_MAS_SDCC_1, 8, 33, -1, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(mas_sdcc_3, MSM8974_PNOC_MAS_SDCC_3, 8, 34, -1, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(mas_sdcc_4, MSM8974_PNOC_MAS_SDCC_4, 8, 36, -1, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(mas_sdcc_2, MSM8974_PNOC_MAS_SDCC_2, 8, 35, -1, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(mas_tsif, MSM8974_PNOC_MAS_TSIF, 8, 37, -1, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(mas_bam_dma, MSM8974_PNOC_MAS_BAM_DMA, 8, 38, -1);
DEFINE_QNODE(mas_blsp_2, MSM8974_PNOC_MAS_BLSP_2, 8, 39, -1, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(mas_usb_hsic, MSM8974_PNOC_MAS_USB_HSIC, 8, 40, -1, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(mas_blsp_1, MSM8974_PNOC_MAS_BLSP_1, 8, 41, -1, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(mas_usb_hs, MSM8974_PNOC_MAS_USB_HS, 8, 42, -1, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(pnoc_to_snoc, MSM8974_PNOC_TO_SNOC, 8, 44, 45, MSM8974_SNOC_TO_PNOC, MSM8974_PNOC_SLV_PRNG);
DEFINE_QNODE(slv_sdcc_1, MSM8974_PNOC_SLV_SDCC_1, 8, -1, 31);
DEFINE_QNODE(slv_sdcc_3, MSM8974_PNOC_SLV_SDCC_3, 8, -1, 32);
DEFINE_QNODE(slv_sdcc_2, MSM8974_PNOC_SLV_SDCC_2, 8, -1, 33);
DEFINE_QNODE(slv_sdcc_4, MSM8974_PNOC_SLV_SDCC_4, 8, -1, 34);
DEFINE_QNODE(slv_tsif, MSM8974_PNOC_SLV_TSIF, 8, -1, 35);
DEFINE_QNODE(slv_bam_dma, MSM8974_PNOC_SLV_BAM_DMA, 8, -1, 36);
DEFINE_QNODE(slv_blsp_2, MSM8974_PNOC_SLV_BLSP_2, 8, -1, 37);
DEFINE_QNODE(slv_usb_hsic, MSM8974_PNOC_SLV_USB_HSIC, 8, -1, 38);
DEFINE_QNODE(slv_blsp_1, MSM8974_PNOC_SLV_BLSP_1, 8, -1, 39);
DEFINE_QNODE(slv_usb_hs, MSM8974_PNOC_SLV_USB_HS, 8, -1, 40);
DEFINE_QNODE(slv_pdm, MSM8974_PNOC_SLV_PDM, 8, -1, 41);
DEFINE_QNODE(slv_periph_apu_cfg, MSM8974_PNOC_SLV_PERIPH_APU_CFG, 8, -1, 42);
DEFINE_QNODE(slv_pnoc_mpu_cfg, MSM8974_PNOC_SLV_PNOC_MPU_CFG, 8, -1, 43);
DEFINE_QNODE(slv_prng, MSM8974_PNOC_SLV_PRNG, 8, -1, 44, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(slv_service_pnoc, MSM8974_PNOC_SLV_SERVICE_PNOC, 8, -1, 46);
static struct msm8974_icc_node * const msm8974_pnoc_nodes[] = {
[PNOC_MAS_PNOC_CFG] = &mas_pnoc_cfg,
[PNOC_MAS_SDCC_1] = &mas_sdcc_1,
[PNOC_MAS_SDCC_3] = &mas_sdcc_3,
[PNOC_MAS_SDCC_4] = &mas_sdcc_4,
[PNOC_MAS_SDCC_2] = &mas_sdcc_2,
[PNOC_MAS_TSIF] = &mas_tsif,
[PNOC_MAS_BAM_DMA] = &mas_bam_dma,
[PNOC_MAS_BLSP_2] = &mas_blsp_2,
[PNOC_MAS_USB_HSIC] = &mas_usb_hsic,
[PNOC_MAS_BLSP_1] = &mas_blsp_1,
[PNOC_MAS_USB_HS] = &mas_usb_hs,
[PNOC_TO_SNOC] = &pnoc_to_snoc,
[PNOC_SLV_SDCC_1] = &slv_sdcc_1,
[PNOC_SLV_SDCC_3] = &slv_sdcc_3,
[PNOC_SLV_SDCC_2] = &slv_sdcc_2,
[PNOC_SLV_SDCC_4] = &slv_sdcc_4,
[PNOC_SLV_TSIF] = &slv_tsif,
[PNOC_SLV_BAM_DMA] = &slv_bam_dma,
[PNOC_SLV_BLSP_2] = &slv_blsp_2,
[PNOC_SLV_USB_HSIC] = &slv_usb_hsic,
[PNOC_SLV_BLSP_1] = &slv_blsp_1,
[PNOC_SLV_USB_HS] = &slv_usb_hs,
[PNOC_SLV_PDM] = &slv_pdm,
[PNOC_SLV_PERIPH_APU_CFG] = &slv_periph_apu_cfg,
[PNOC_SLV_PNOC_MPU_CFG] = &slv_pnoc_mpu_cfg,
[PNOC_SLV_PRNG] = &slv_prng,
[PNOC_SLV_SERVICE_PNOC] = &slv_service_pnoc,
};
static const struct msm8974_icc_desc msm8974_pnoc = {
.nodes = msm8974_pnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_pnoc_nodes),
};
DEFINE_QNODE(mas_lpass_ahb, MSM8974_SNOC_MAS_LPASS_AHB, 8, 18, -1);
DEFINE_QNODE(mas_qdss_bam, MSM8974_SNOC_MAS_QDSS_BAM, 8, 19, -1);
DEFINE_QNODE(mas_snoc_cfg, MSM8974_SNOC_MAS_SNOC_CFG, 8, 20, -1);
DEFINE_QNODE(snoc_to_bimc, MSM8974_SNOC_TO_BIMC, 8, 21, 24, MSM8974_BIMC_TO_SNOC);
DEFINE_QNODE(snoc_to_cnoc, MSM8974_SNOC_TO_CNOC, 8, 22, 25);
DEFINE_QNODE(snoc_to_pnoc, MSM8974_SNOC_TO_PNOC, 8, 29, 28, MSM8974_PNOC_TO_SNOC);
DEFINE_QNODE(snoc_to_ocmem_vnoc, MSM8974_SNOC_TO_OCMEM_VNOC, 8, 53, 77, MSM8974_OCMEM_VNOC_TO_OCMEM_NOC);
DEFINE_QNODE(mas_crypto_core0, MSM8974_SNOC_MAS_CRYPTO_CORE0, 8, 23, -1, MSM8974_SNOC_TO_BIMC);
DEFINE_QNODE(mas_crypto_core1, MSM8974_SNOC_MAS_CRYPTO_CORE1, 8, 24, -1);
DEFINE_QNODE(mas_lpass_proc, MSM8974_SNOC_MAS_LPASS_PROC, 8, 25, -1, MSM8974_SNOC_TO_OCMEM_VNOC);
DEFINE_QNODE(mas_mss, MSM8974_SNOC_MAS_MSS, 8, 26, -1);
DEFINE_QNODE(mas_mss_nav, MSM8974_SNOC_MAS_MSS_NAV, 8, 27, -1);
DEFINE_QNODE(mas_ocmem_dma, MSM8974_SNOC_MAS_OCMEM_DMA, 8, 28, -1);
DEFINE_QNODE(mas_wcss, MSM8974_SNOC_MAS_WCSS, 8, 30, -1);
DEFINE_QNODE(mas_qdss_etr, MSM8974_SNOC_MAS_QDSS_ETR, 8, 31, -1);
DEFINE_QNODE(mas_usb3, MSM8974_SNOC_MAS_USB3, 8, 32, -1, MSM8974_SNOC_TO_BIMC);
DEFINE_QNODE(slv_ampss, MSM8974_SNOC_SLV_AMPSS, 8, -1, 20);
DEFINE_QNODE(slv_lpass, MSM8974_SNOC_SLV_LPASS, 8, -1, 21);
DEFINE_QNODE(slv_usb3, MSM8974_SNOC_SLV_USB3, 8, -1, 22);
DEFINE_QNODE(slv_wcss, MSM8974_SNOC_SLV_WCSS, 8, -1, 23);
DEFINE_QNODE(slv_ocimem, MSM8974_SNOC_SLV_OCIMEM, 8, -1, 26);
DEFINE_QNODE(slv_snoc_ocmem, MSM8974_SNOC_SLV_SNOC_OCMEM, 8, -1, 27);
DEFINE_QNODE(slv_service_snoc, MSM8974_SNOC_SLV_SERVICE_SNOC, 8, -1, 29);
DEFINE_QNODE(slv_qdss_stm, MSM8974_SNOC_SLV_QDSS_STM, 8, -1, 30);
static struct msm8974_icc_node * const msm8974_snoc_nodes[] = {
[SNOC_MAS_LPASS_AHB] = &mas_lpass_ahb,
[SNOC_MAS_QDSS_BAM] = &mas_qdss_bam,
[SNOC_MAS_SNOC_CFG] = &mas_snoc_cfg,
[SNOC_TO_BIMC] = &snoc_to_bimc,
[SNOC_TO_CNOC] = &snoc_to_cnoc,
[SNOC_TO_PNOC] = &snoc_to_pnoc,
[SNOC_TO_OCMEM_VNOC] = &snoc_to_ocmem_vnoc,
[SNOC_MAS_CRYPTO_CORE0] = &mas_crypto_core0,
[SNOC_MAS_CRYPTO_CORE1] = &mas_crypto_core1,
[SNOC_MAS_LPASS_PROC] = &mas_lpass_proc,
[SNOC_MAS_MSS] = &mas_mss,
[SNOC_MAS_MSS_NAV] = &mas_mss_nav,
[SNOC_MAS_OCMEM_DMA] = &mas_ocmem_dma,
[SNOC_MAS_WCSS] = &mas_wcss,
[SNOC_MAS_QDSS_ETR] = &mas_qdss_etr,
[SNOC_MAS_USB3] = &mas_usb3,
[SNOC_SLV_AMPSS] = &slv_ampss,
[SNOC_SLV_LPASS] = &slv_lpass,
[SNOC_SLV_USB3] = &slv_usb3,
[SNOC_SLV_WCSS] = &slv_wcss,
[SNOC_SLV_OCIMEM] = &slv_ocimem,
[SNOC_SLV_SNOC_OCMEM] = &slv_snoc_ocmem,
[SNOC_SLV_SERVICE_SNOC] = &slv_service_snoc,
[SNOC_SLV_QDSS_STM] = &slv_qdss_stm,
};
static const struct msm8974_icc_desc msm8974_snoc = {
.nodes = msm8974_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8974_snoc_nodes),
};
static void msm8974_icc_rpm_smd_send(struct device *dev, int rsc_type,
char *name, int id, u64 val)
{
int ret;
if (id == -1)
return;
/*
* Setting the bandwidth requests for some nodes fails and this same
* behavior occurs on the downstream MSM 3.4 kernel sources based on
* errors like this in that kernel:
*
* msm_rpm_get_error_from_ack(): RPM NACK Unsupported resource
* AXI: msm_bus_rpm_req(): RPM: Ack failed
* AXI: msm_bus_rpm_commit_arb(): RPM: Req fail: mas:32, bw:240000000
*
* Since there's no publicly available documentation for this hardware,
* and the bandwidth for some nodes in the path can be set properly,
* let's not return an error.
*/
ret = qcom_icc_rpm_smd_send(QCOM_SMD_RPM_ACTIVE_STATE, rsc_type, id,
val);
if (ret)
dev_dbg(dev, "Cannot set bandwidth for node %s (%d): %d\n",
name, id, ret);
}
static int msm8974_icc_set(struct icc_node *src, struct icc_node *dst)
{
struct msm8974_icc_node *src_qn, *dst_qn;
struct msm8974_icc_provider *qp;
u64 sum_bw, max_peak_bw, rate;
u32 agg_avg = 0, agg_peak = 0;
struct icc_provider *provider;
struct icc_node *n;
int ret, i;
src_qn = src->data;
dst_qn = dst->data;
provider = src->provider;
qp = to_msm8974_icc_provider(provider);
list_for_each_entry(n, &provider->nodes, node_list)
provider->aggregate(n, 0, n->avg_bw, n->peak_bw,
&agg_avg, &agg_peak);
sum_bw = icc_units_to_bps(agg_avg);
max_peak_bw = icc_units_to_bps(agg_peak);
/* Set bandwidth on source node */
msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_MASTER_REQ,
src_qn->name, src_qn->mas_rpm_id, sum_bw);
msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_SLAVE_REQ,
src_qn->name, src_qn->slv_rpm_id, sum_bw);
/* Set bandwidth on destination node */
msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_MASTER_REQ,
dst_qn->name, dst_qn->mas_rpm_id, sum_bw);
msm8974_icc_rpm_smd_send(provider->dev, RPM_BUS_SLAVE_REQ,
dst_qn->name, dst_qn->slv_rpm_id, sum_bw);
rate = max(sum_bw, max_peak_bw);
do_div(rate, src_qn->buswidth);
rate = min_t(u32, rate, INT_MAX);
if (src_qn->rate == rate)
return 0;
for (i = 0; i < qp->num_clks; i++) {
ret = clk_set_rate(qp->bus_clks[i].clk, rate);
if (ret) {
dev_err(provider->dev, "%s clk_set_rate error: %d\n",
qp->bus_clks[i].id, ret);
ret = 0;
}
}
src_qn->rate = rate;
return 0;
}
static int msm8974_get_bw(struct icc_node *node, u32 *avg, u32 *peak)
{
*avg = 0;
*peak = 0;
return 0;
}
static int msm8974_icc_probe(struct platform_device *pdev)
{
const struct msm8974_icc_desc *desc;
struct msm8974_icc_node * const *qnodes;
struct msm8974_icc_provider *qp;
struct device *dev = &pdev->dev;
struct icc_onecell_data *data;
struct icc_provider *provider;
struct icc_node *node;
size_t num_nodes, i;
int ret;
/* wait for the RPM proxy */
if (!qcom_icc_rpm_smd_available())
return -EPROBE_DEFER;
desc = of_device_get_match_data(dev);
if (!desc)
return -EINVAL;
qnodes = desc->nodes;
num_nodes = desc->num_nodes;
qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL);
if (!qp)
return -ENOMEM;
data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
GFP_KERNEL);
if (!data)
return -ENOMEM;
data->num_nodes = num_nodes;
qp->bus_clks = devm_kmemdup(dev, msm8974_icc_bus_clocks,
sizeof(msm8974_icc_bus_clocks), GFP_KERNEL);
if (!qp->bus_clks)
return -ENOMEM;
qp->num_clks = ARRAY_SIZE(msm8974_icc_bus_clocks);
ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
if (ret)
return ret;
ret = clk_bulk_prepare_enable(qp->num_clks, qp->bus_clks);
if (ret)
return ret;
provider = &qp->provider;
provider->dev = dev;
provider->set = msm8974_icc_set;
provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
provider->data = data;
provider->get_bw = msm8974_get_bw;
icc_provider_init(provider);
for (i = 0; i < num_nodes; i++) {
size_t j;
node = icc_node_create(qnodes[i]->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
goto err_remove_nodes;
}
node->name = qnodes[i]->name;
node->data = qnodes[i];
icc_node_add(node, provider);
dev_dbg(dev, "registered node %s\n", node->name);
/* populate links */
for (j = 0; j < qnodes[i]->num_links; j++)
icc_link_create(node, qnodes[i]->links[j]);
data->nodes[i] = node;
}
ret = icc_provider_register(provider);
if (ret)
goto err_remove_nodes;
platform_set_drvdata(pdev, qp);
return 0;
err_remove_nodes:
icc_nodes_remove(provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
return ret;
}
static int msm8974_icc_remove(struct platform_device *pdev)
{
struct msm8974_icc_provider *qp = platform_get_drvdata(pdev);
icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks);
return 0;
}
static const struct of_device_id msm8974_noc_of_match[] = {
{ .compatible = "qcom,msm8974-bimc", .data = &msm8974_bimc},
{ .compatible = "qcom,msm8974-cnoc", .data = &msm8974_cnoc},
{ .compatible = "qcom,msm8974-mmssnoc", .data = &msm8974_mnoc},
{ .compatible = "qcom,msm8974-ocmemnoc", .data = &msm8974_onoc},
{ .compatible = "qcom,msm8974-pnoc", .data = &msm8974_pnoc},
{ .compatible = "qcom,msm8974-snoc", .data = &msm8974_snoc},
{ },
};
MODULE_DEVICE_TABLE(of, msm8974_noc_of_match);
static struct platform_driver msm8974_noc_driver = {
.probe = msm8974_icc_probe,
.remove = msm8974_icc_remove,
.driver = {
.name = "qnoc-msm8974",
.of_match_table = msm8974_noc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(msm8974_noc_driver);
MODULE_DESCRIPTION("Qualcomm MSM8974 NoC driver");
MODULE_AUTHOR("Brian Masney <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/msm8974.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Linaro Ltd.
*/
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "icc-common.h"
struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data)
{
struct icc_node_data *ndata;
struct icc_node *node;
node = of_icc_xlate_onecell(spec, data);
if (IS_ERR(node))
return ERR_CAST(node);
ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
if (!ndata)
return ERR_PTR(-ENOMEM);
ndata->node = node;
if (spec->args_count == 2)
ndata->tag = spec->args[1];
if (spec->args_count > 2)
pr_warn("%pOF: Too many arguments, path tag is not parsed\n", spec->np);
return ndata;
}
EXPORT_SYMBOL_GPL(qcom_icc_xlate_extended);
MODULE_LICENSE("GPL");
| linux-master | drivers/interconnect/qcom/icc-common.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sm8250.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sm8250.h"
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
.id = SM8250_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_SLAVE_SERVICE_A1NOC },
};
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
.id = SM8250_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.id = SM8250_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
.id = SM8250_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_tsif = {
.name = "qhm_tsif",
.id = SM8250_MASTER_TSIF,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_pcie3_modem = {
.name = "xm_pcie3_modem",
.id = SM8250_MASTER_PCIE_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_SLAVE_ANOC_PCIE_GEM_NOC_1 },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
.id = SM8250_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SM8250_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.id = SM8250_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
.id = SM8250_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_A1NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
.id = SM8250_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_SLAVE_SERVICE_A2NOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SM8250_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
.id = SM8250_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qnm_cnoc = {
.name = "qnm_cnoc",
.id = SM8250_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SM8250_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SM8250_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
.id = SM8250_MASTER_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
.id = SM8250_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
.id = SM8250_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
.id = SM8250_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node xm_ufs_card = {
.name = "xm_ufs_card",
.id = SM8250_MASTER_UFS_CARD,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_A2NOC_SNOC_SLV },
};
static struct qcom_icc_node qnm_npu = {
.name = "qnm_npu",
.id = SM8250_MASTER_NPU,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_SLAVE_CDSP_MEM_NOC },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
.id = SM8250_SNOC_CNOC_MAS,
.channels = 1,
.buswidth = 8,
.num_links = 49,
.links = { SM8250_SLAVE_CDSP_CFG,
SM8250_SLAVE_CAMERA_CFG,
SM8250_SLAVE_TLMM_SOUTH,
SM8250_SLAVE_TLMM_NORTH,
SM8250_SLAVE_SDCC_4,
SM8250_SLAVE_TLMM_WEST,
SM8250_SLAVE_SDCC_2,
SM8250_SLAVE_CNOC_MNOC_CFG,
SM8250_SLAVE_UFS_MEM_CFG,
SM8250_SLAVE_SNOC_CFG,
SM8250_SLAVE_PDM,
SM8250_SLAVE_CX_RDPM,
SM8250_SLAVE_PCIE_1_CFG,
SM8250_SLAVE_A2NOC_CFG,
SM8250_SLAVE_QDSS_CFG,
SM8250_SLAVE_DISPLAY_CFG,
SM8250_SLAVE_PCIE_2_CFG,
SM8250_SLAVE_TCSR,
SM8250_SLAVE_DCC_CFG,
SM8250_SLAVE_CNOC_DDRSS,
SM8250_SLAVE_IPC_ROUTER_CFG,
SM8250_SLAVE_PCIE_0_CFG,
SM8250_SLAVE_RBCPR_MMCX_CFG,
SM8250_SLAVE_NPU_CFG,
SM8250_SLAVE_AHB2PHY_SOUTH,
SM8250_SLAVE_AHB2PHY_NORTH,
SM8250_SLAVE_GRAPHICS_3D_CFG,
SM8250_SLAVE_VENUS_CFG,
SM8250_SLAVE_TSIF,
SM8250_SLAVE_IPA_CFG,
SM8250_SLAVE_IMEM_CFG,
SM8250_SLAVE_USB3,
SM8250_SLAVE_SERVICE_CNOC,
SM8250_SLAVE_UFS_CARD_CFG,
SM8250_SLAVE_USB3_1,
SM8250_SLAVE_LPASS,
SM8250_SLAVE_RBCPR_CX_CFG,
SM8250_SLAVE_A1NOC_CFG,
SM8250_SLAVE_AOSS,
SM8250_SLAVE_PRNG,
SM8250_SLAVE_VSENSE_CTRL_CFG,
SM8250_SLAVE_QSPI_0,
SM8250_SLAVE_CRYPTO_0_CFG,
SM8250_SLAVE_PIMEM_CFG,
SM8250_SLAVE_RBCPR_MX_CFG,
SM8250_SLAVE_QUP_0,
SM8250_SLAVE_QUP_1,
SM8250_SLAVE_QUP_2,
SM8250_SLAVE_CLK_CTL
},
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
.id = SM8250_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 50,
.links = { SM8250_SLAVE_CDSP_CFG,
SM8250_SLAVE_CAMERA_CFG,
SM8250_SLAVE_TLMM_SOUTH,
SM8250_SLAVE_TLMM_NORTH,
SM8250_SLAVE_SDCC_4,
SM8250_SLAVE_TLMM_WEST,
SM8250_SLAVE_SDCC_2,
SM8250_SLAVE_CNOC_MNOC_CFG,
SM8250_SLAVE_UFS_MEM_CFG,
SM8250_SLAVE_SNOC_CFG,
SM8250_SLAVE_PDM,
SM8250_SLAVE_CX_RDPM,
SM8250_SLAVE_PCIE_1_CFG,
SM8250_SLAVE_A2NOC_CFG,
SM8250_SLAVE_QDSS_CFG,
SM8250_SLAVE_DISPLAY_CFG,
SM8250_SLAVE_PCIE_2_CFG,
SM8250_SLAVE_TCSR,
SM8250_SLAVE_DCC_CFG,
SM8250_SLAVE_CNOC_DDRSS,
SM8250_SLAVE_IPC_ROUTER_CFG,
SM8250_SLAVE_CNOC_A2NOC,
SM8250_SLAVE_PCIE_0_CFG,
SM8250_SLAVE_RBCPR_MMCX_CFG,
SM8250_SLAVE_NPU_CFG,
SM8250_SLAVE_AHB2PHY_SOUTH,
SM8250_SLAVE_AHB2PHY_NORTH,
SM8250_SLAVE_GRAPHICS_3D_CFG,
SM8250_SLAVE_VENUS_CFG,
SM8250_SLAVE_TSIF,
SM8250_SLAVE_IPA_CFG,
SM8250_SLAVE_IMEM_CFG,
SM8250_SLAVE_USB3,
SM8250_SLAVE_SERVICE_CNOC,
SM8250_SLAVE_UFS_CARD_CFG,
SM8250_SLAVE_USB3_1,
SM8250_SLAVE_LPASS,
SM8250_SLAVE_RBCPR_CX_CFG,
SM8250_SLAVE_A1NOC_CFG,
SM8250_SLAVE_AOSS,
SM8250_SLAVE_PRNG,
SM8250_SLAVE_VSENSE_CTRL_CFG,
SM8250_SLAVE_QSPI_0,
SM8250_SLAVE_CRYPTO_0_CFG,
SM8250_SLAVE_PIMEM_CFG,
SM8250_SLAVE_RBCPR_MX_CFG,
SM8250_SLAVE_QUP_0,
SM8250_SLAVE_QUP_1,
SM8250_SLAVE_QUP_2,
SM8250_SLAVE_CLK_CTL
},
};
static struct qcom_icc_node qhm_cnoc_dc_noc = {
.name = "qhm_cnoc_dc_noc",
.id = SM8250_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SM8250_SLAVE_GEM_NOC_CFG,
SM8250_SLAVE_LLCC_CFG
},
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
.id = SM8250_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8250_SLAVE_LLCC,
SM8250_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
.id = SM8250_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8250_SLAVE_LLCC,
SM8250_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
.id = SM8250_MASTER_AMPSS_M0,
.channels = 2,
.buswidth = 32,
.num_links = 3,
.links = { SM8250_SLAVE_LLCC,
SM8250_SLAVE_GEM_NOC_SNOC,
SM8250_SLAVE_MEM_NOC_PCIE_SNOC
},
};
static struct qcom_icc_node qhm_gemnoc_cfg = {
.name = "qhm_gemnoc_cfg",
.id = SM8250_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 3,
.links = { SM8250_SLAVE_SERVICE_GEM_NOC_2,
SM8250_SLAVE_SERVICE_GEM_NOC_1,
SM8250_SLAVE_SERVICE_GEM_NOC
},
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
.id = SM8250_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8250_SLAVE_LLCC,
SM8250_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
.id = SM8250_MASTER_GRAPHICS_3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8250_SLAVE_LLCC,
SM8250_SLAVE_GEM_NOC_SNOC },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SM8250_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SM8250_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8250_SLAVE_LLCC,
SM8250_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
.id = SM8250_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
.links = { SM8250_SLAVE_LLCC,
SM8250_SLAVE_GEM_NOC_SNOC
},
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SM8250_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SM8250_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SM8250_SLAVE_LLCC,
SM8250_SLAVE_GEM_NOC_SNOC,
SM8250_SLAVE_MEM_NOC_PCIE_SNOC
},
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SM8250_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_SLAVE_EBI_CH0 },
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
.id = SM8250_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_SLAVE_SERVICE_MNOC },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
.id = SM8250_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
.id = SM8250_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
.id = SM8250_MASTER_CAMNOC_SF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
.id = SM8250_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video1 = {
.name = "qnm_video1",
.id = SM8250_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
.id = SM8250_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
.id = SM8250_MASTER_MDP_PORT0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp1 = {
.name = "qxm_mdp1",
.id = SM8250_MASTER_MDP_PORT1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
.id = SM8250_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node amm_npu_sys = {
.name = "amm_npu_sys",
.id = SM8250_MASTER_NPU_SYS,
.channels = 4,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_SLAVE_NPU_COMPUTE_NOC },
};
static struct qcom_icc_node amm_npu_sys_cdp_w = {
.name = "amm_npu_sys_cdp_w",
.id = SM8250_MASTER_NPU_CDP,
.channels = 2,
.buswidth = 16,
.num_links = 1,
.links = { SM8250_SLAVE_NPU_COMPUTE_NOC },
};
static struct qcom_icc_node qhm_cfg = {
.name = "qhm_cfg",
.id = SM8250_MASTER_NPU_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 9,
.links = { SM8250_SLAVE_SERVICE_NPU_NOC,
SM8250_SLAVE_ISENSE_CFG,
SM8250_SLAVE_NPU_LLM_CFG,
SM8250_SLAVE_NPU_INT_DMA_BWMON_CFG,
SM8250_SLAVE_NPU_CP,
SM8250_SLAVE_NPU_TCM,
SM8250_SLAVE_NPU_CAL_DP0,
SM8250_SLAVE_NPU_CAL_DP1,
SM8250_SLAVE_NPU_DPM
},
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
.id = SM8250_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
.id = SM8250_A1NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8250_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
.id = SM8250_A2NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8250_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_gemnoc = {
.name = "qnm_gemnoc",
.id = SM8250_MASTER_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 6,
.links = { SM8250_SLAVE_PIMEM,
SM8250_SLAVE_OCIMEM,
SM8250_SLAVE_APPSS,
SM8250_SNOC_CNOC_SLV,
SM8250_SLAVE_TCU,
SM8250_SLAVE_QDSS_STM
},
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
.id = SM8250_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 3,
.links = { SM8250_SLAVE_PCIE_2,
SM8250_SLAVE_PCIE_0,
SM8250_SLAVE_PCIE_1
},
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.id = SM8250_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = SM8250_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SM8250_A1NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8250_A1NOC_SNOC_MAS },
};
static struct qcom_icc_node qns_pcie_modem_mem_noc = {
.name = "qns_pcie_modem_mem_noc",
.id = SM8250_SLAVE_ANOC_PCIE_GEM_NOC_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8250_MASTER_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
.id = SM8250_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
.id = SM8250_A2NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8250_A2NOC_SNOC_MAS },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
.id = SM8250_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8250_MASTER_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
.id = SM8250_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cdsp_mem_noc = {
.name = "qns_cdsp_mem_noc",
.id = SM8250_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_MASTER_COMPUTE_NOC },
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
.id = SM8250_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_MASTER_A1NOC_CFG },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
.id = SM8250_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_MASTER_A2NOC_CFG },
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
.id = SM8250_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
.id = SM8250_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SM8250_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
.id = SM8250_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SM8250_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute_dsp = {
.name = "qhs_compute_dsp",
.id = SM8250_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = SM8250_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
.id = SM8250_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
.id = SM8250_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SM8250_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
.id = SM8250_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
.id = SM8250_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
.id = SM8250_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_MASTER_CNOC_DC_NOC },
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
.id = SM8250_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
.id = SM8250_SLAVE_GRAPHICS_3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SM8250_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SM8250_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
.id = SM8250_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
.id = SM8250_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
.id = SM8250_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_MASTER_CNOC_MNOC_CFG },
};
static struct qcom_icc_node qhs_npu_cfg = {
.name = "qhs_npu_cfg",
.id = SM8250_SLAVE_NPU_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_MASTER_NPU_NOC_CFG },
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
.id = SM8250_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
.id = SM8250_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_modem_cfg = {
.name = "qhs_pcie_modem_cfg",
.id = SM8250_SLAVE_PCIE_2_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SM8250_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = SM8250_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
.id = SM8250_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SM8250_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
.id = SM8250_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
.id = SM8250_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
.id = SM8250_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
.id = SM8250_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
.id = SM8250_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
.id = SM8250_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
.id = SM8250_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_MASTER_SNOC_CFG },
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SM8250_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm0 = {
.name = "qhs_tlmm0",
.id = SM8250_SLAVE_TLMM_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm1 = {
.name = "qhs_tlmm1",
.id = SM8250_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm2 = {
.name = "qhs_tlmm2",
.id = SM8250_SLAVE_TLMM_WEST,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tsif = {
.name = "qhs_tsif",
.id = SM8250_SLAVE_TSIF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_card_cfg = {
.name = "qhs_ufs_card_cfg",
.id = SM8250_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
.id = SM8250_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
.id = SM8250_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_1 = {
.name = "qhs_usb3_1",
.id = SM8250_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
.id = SM8250_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
.id = SM8250_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cnoc_a2noc = {
.name = "qns_cnoc_a2noc",
.id = SM8250_SLAVE_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_MASTER_CNOC_A2NOC },
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
.id = SM8250_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
.id = SM8250_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_memnoc = {
.name = "qhs_memnoc",
.id = SM8250_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_MASTER_GEM_NOC_CFG },
};
static struct qcom_icc_node qns_gem_noc_snoc = {
.name = "qns_gem_noc_snoc",
.id = SM8250_SLAVE_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8250_MASTER_GEM_NOC_SNOC },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SM8250_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
.links = { SM8250_MASTER_LLCC },
};
static struct qcom_icc_node qns_sys_pcie = {
.name = "qns_sys_pcie",
.id = SM8250_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_MASTER_GEM_NOC_PCIE_SNOC },
};
static struct qcom_icc_node srvc_even_gemnoc = {
.name = "srvc_even_gemnoc",
.id = SM8250_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_odd_gemnoc = {
.name = "srvc_odd_gemnoc",
.id = SM8250_SLAVE_SERVICE_GEM_NOC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_sys_gemnoc = {
.name = "srvc_sys_gemnoc",
.id = SM8250_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SM8250_SLAVE_EBI_CH0,
.channels = 4,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
.id = SM8250_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_MASTER_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
.id = SM8250_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8250_MASTER_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
.id = SM8250_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cal_dp0 = {
.name = "qhs_cal_dp0",
.id = SM8250_SLAVE_NPU_CAL_DP0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cal_dp1 = {
.name = "qhs_cal_dp1",
.id = SM8250_SLAVE_NPU_CAL_DP1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cp = {
.name = "qhs_cp",
.id = SM8250_SLAVE_NPU_CP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dma_bwmon = {
.name = "qhs_dma_bwmon",
.id = SM8250_SLAVE_NPU_INT_DMA_BWMON_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dpm = {
.name = "qhs_dpm",
.id = SM8250_SLAVE_NPU_DPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_isense = {
.name = "qhs_isense",
.id = SM8250_SLAVE_ISENSE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llm = {
.name = "qhs_llm",
.id = SM8250_SLAVE_NPU_LLM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcm = {
.name = "qhs_tcm",
.id = SM8250_SLAVE_NPU_TCM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_npu_sys = {
.name = "qns_npu_sys",
.id = SM8250_SLAVE_NPU_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
};
static struct qcom_icc_node srvc_noc = {
.name = "srvc_noc",
.id = SM8250_SLAVE_SERVICE_NPU_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SM8250_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
.id = SM8250_SNOC_CNOC_SLV,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_SNOC_CNOC_MAS },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
.id = SM8250_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8250_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
.id = SM8250_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8250_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SM8250_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
.id = SM8250_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SM8250_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
.id = SM8250_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
.id = SM8250_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_modem = {
.name = "xs_pcie_modem",
.id = SM8250_SLAVE_PCIE_2,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SM8250_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SM8250_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = SM8250_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_SLAVE_QUP_CORE_0 },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
.id = SM8250_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_SLAVE_QUP_CORE_1 },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
.id = SM8250_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8250_SLAVE_QUP_CORE_2 },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = SM8250_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
.id = SM8250_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
.id = SM8250_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.keepalive = false,
.num_nodes = 3,
.nodes = { &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1 },
};
static struct qcom_icc_bcm bcm_sh2 = {
.name = "SH2",
.keepalive = false,
.num_nodes = 2,
.nodes = { &alm_gpu_tcu, &alm_sys_tcu },
};
static struct qcom_icc_bcm bcm_mm2 = {
.name = "MM2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.keepalive = false,
.num_nodes = 3,
.nodes = { &qup0_core_master, &qup1_core_master, &qup2_core_master },
};
static struct qcom_icc_bcm bcm_sh3 = {
.name = "SH3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_bcm bcm_mm3 = {
.name = "MM3",
.keepalive = false,
.num_nodes = 5,
.nodes = { &qnm_camnoc_icp, &qnm_camnoc_sf, &qnm_video0, &qnm_video1, &qnm_video_cvp },
};
static struct qcom_icc_bcm bcm_sh4 = {
.name = "SH4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &chm_apps },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_cdsp_mem_noc },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.keepalive = true,
.num_nodes = 52,
.nodes = { &qnm_snoc,
&xm_qdss_dap,
&qhs_a1_noc_cfg,
&qhs_a2_noc_cfg,
&qhs_ahb2phy0,
&qhs_ahb2phy1,
&qhs_aoss,
&qhs_camera_cfg,
&qhs_clk_ctl,
&qhs_compute_dsp,
&qhs_cpr_cx,
&qhs_cpr_mmcx,
&qhs_cpr_mx,
&qhs_crypto0_cfg,
&qhs_cx_rdpm,
&qhs_dcc_cfg,
&qhs_ddrss_cfg,
&qhs_display_cfg,
&qhs_gpuss_cfg,
&qhs_imem_cfg,
&qhs_ipa,
&qhs_ipc_router,
&qhs_lpass_cfg,
&qhs_mnoc_cfg,
&qhs_npu_cfg,
&qhs_pcie0_cfg,
&qhs_pcie1_cfg,
&qhs_pcie_modem_cfg,
&qhs_pdm,
&qhs_pimem_cfg,
&qhs_prng,
&qhs_qdss_cfg,
&qhs_qspi,
&qhs_qup0,
&qhs_qup1,
&qhs_qup2,
&qhs_sdc2,
&qhs_sdc4,
&qhs_snoc_cfg,
&qhs_tcsr,
&qhs_tlmm0,
&qhs_tlmm1,
&qhs_tlmm2,
&qhs_tsif,
&qhs_ufs_card_cfg,
&qhs_ufs_mem_cfg,
&qhs_usb3_0,
&qhs_usb3_1,
&qhs_venus_cfg,
&qhs_vsense_ctrl_cfg,
&qns_cnoc_a2noc,
&srvc_cnoc
},
};
static struct qcom_icc_bcm bcm_sn1 = {
.name = "SN1",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_imem },
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_bcm bcm_co2 = {
.name = "CO2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_npu },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_pimem },
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_qdss_stm },
};
static struct qcom_icc_bcm bcm_sn5 = {
.name = "SN5",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_pcie_modem },
};
static struct qcom_icc_bcm bcm_sn6 = {
.name = "SN6",
.keepalive = false,
.num_nodes = 2,
.nodes = { &xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_bcm bcm_sn7 = {
.name = "SN7",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_bcm bcm_sn8 = {
.name = "SN8",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_bcm bcm_sn9 = {
.name = "SN9",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_bcm bcm_sn11 = {
.name = "SN11",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_gemnoc },
};
static struct qcom_icc_bcm bcm_sn12 = {
.name = "SN12",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qns_pcie_modem_mem_noc, &qns_pcie_mem_noc },
};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn12,
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_QUP_2] = &qhm_qup2,
[MASTER_TSIF] = &qhm_tsif,
[MASTER_PCIE_2] = &xm_pcie3_modem,
[MASTER_SDCC_4] = &xm_sdc4,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[MASTER_USB3] = &xm_usb3_0,
[MASTER_USB3_1] = &xm_usb3_1,
[A1NOC_SNOC_SLV] = &qns_a1noc_snoc,
[SLAVE_ANOC_PCIE_GEM_NOC_1] = &qns_pcie_modem_mem_noc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
static const struct qcom_icc_desc sm8250_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_sn12,
};
static struct qcom_icc_bcm * const qup_virt_bcms[] = {
&bcm_qup0,
};
static struct qcom_icc_node *qup_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
};
static const struct qcom_icc_desc sm8250_qup_virt = {
.nodes = qup_virt_nodes,
.num_nodes = ARRAY_SIZE(qup_virt_nodes),
.bcms = qup_virt_bcms,
.num_bcms = ARRAY_SIZE(qup_virt_bcms),
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_CNOC_A2NOC] = &qnm_cnoc,
[MASTER_CRYPTO_CORE_0] = &qxm_crypto,
[MASTER_IPA] = &qxm_ipa,
[MASTER_PCIE] = &xm_pcie3_0,
[MASTER_PCIE_1] = &xm_pcie3_1,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[MASTER_SDCC_2] = &xm_sdc2,
[MASTER_UFS_CARD] = &xm_ufs_card,
[A2NOC_SNOC_SLV] = &qns_a2noc_snoc,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
static const struct qcom_icc_desc sm8250_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co2,
};
static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &qnm_npu,
[SLAVE_CDSP_MEM_NOC] = &qns_cdsp_mem_noc,
};
static const struct qcom_icc_desc sm8250_compute_noc = {
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
.num_bcms = ARRAY_SIZE(compute_noc_bcms),
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
};
static struct qcom_icc_node * const config_noc_nodes[] = {
[SNOC_CNOC_MAS] = &qnm_snoc,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
[SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
[SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CDSP_CFG] = &qhs_compute_dsp,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
[SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_CX_RDPM] = &qhs_cx_rdpm,
[SLAVE_DCC_CFG] = &qhs_dcc_cfg,
[SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
[SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
[SLAVE_GRAPHICS_3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
[SLAVE_LPASS] = &qhs_lpass_cfg,
[SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
[SLAVE_NPU_CFG] = &qhs_npu_cfg,
[SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
[SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
[SLAVE_PCIE_2_CFG] = &qhs_pcie_modem_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QSPI_0] = &qhs_qspi,
[SLAVE_QUP_0] = &qhs_qup0,
[SLAVE_QUP_1] = &qhs_qup1,
[SLAVE_QUP_2] = &qhs_qup2,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SDCC_4] = &qhs_sdc4,
[SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM_NORTH] = &qhs_tlmm0,
[SLAVE_TLMM_SOUTH] = &qhs_tlmm1,
[SLAVE_TLMM_WEST] = &qhs_tlmm2,
[SLAVE_TSIF] = &qhs_tsif,
[SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB3] = &qhs_usb3_0,
[SLAVE_USB3_1] = &qhs_usb3_1,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
static const struct qcom_icc_desc sm8250_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qhs_memnoc,
};
static const struct qcom_icc_desc sm8250_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_AMPSS_M0] = &chm_apps,
[MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
[MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
[MASTER_GRAPHICS_3D] = &qnm_gpu,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[SLAVE_GEM_NOC_SNOC] = &qns_gem_noc_snoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_sys_pcie,
[SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
[SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
[SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
};
static const struct qcom_icc_desc sm8250_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI_CH0] = &ebi,
};
static const struct qcom_icc_desc sm8250_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
&bcm_mm3,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
[MASTER_VIDEO_P0] = &qnm_video0,
[MASTER_VIDEO_P1] = &qnm_video1,
[MASTER_VIDEO_PROC] = &qnm_video_cvp,
[MASTER_MDP_PORT0] = &qxm_mdp0,
[MASTER_MDP_PORT1] = &qxm_mdp1,
[MASTER_ROTATOR] = &qxm_rot,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
static const struct qcom_icc_desc sm8250_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_bcm * const npu_noc_bcms[] = {
};
static struct qcom_icc_node * const npu_noc_nodes[] = {
[MASTER_NPU_SYS] = &amm_npu_sys,
[MASTER_NPU_CDP] = &amm_npu_sys_cdp_w,
[MASTER_NPU_NOC_CFG] = &qhm_cfg,
[SLAVE_NPU_CAL_DP0] = &qhs_cal_dp0,
[SLAVE_NPU_CAL_DP1] = &qhs_cal_dp1,
[SLAVE_NPU_CP] = &qhs_cp,
[SLAVE_NPU_INT_DMA_BWMON_CFG] = &qhs_dma_bwmon,
[SLAVE_NPU_DPM] = &qhs_dpm,
[SLAVE_ISENSE_CFG] = &qhs_isense,
[SLAVE_NPU_LLM_CFG] = &qhs_llm,
[SLAVE_NPU_TCM] = &qhs_tcm,
[SLAVE_NPU_COMPUTE_NOC] = &qns_npu_sys,
[SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
};
static const struct qcom_icc_desc sm8250_npu_noc = {
.nodes = npu_noc_nodes,
.num_nodes = ARRAY_SIZE(npu_noc_nodes),
.bcms = npu_noc_bcms,
.num_bcms = ARRAY_SIZE(npu_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn11,
&bcm_sn2,
&bcm_sn3,
&bcm_sn4,
&bcm_sn5,
&bcm_sn6,
&bcm_sn7,
&bcm_sn8,
&bcm_sn9,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
[A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
[MASTER_GEM_NOC_SNOC] = &qnm_gemnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[MASTER_PIMEM] = &qxm_pimem,
[MASTER_GIC] = &xm_gic,
[SLAVE_APPSS] = &qhs_apss,
[SNOC_CNOC_SLV] = &qns_cnoc,
[SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
[SLAVE_OCIMEM] = &qxs_imem,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
[SLAVE_PCIE_0] = &xs_pcie_0,
[SLAVE_PCIE_1] = &xs_pcie_1,
[SLAVE_PCIE_2] = &xs_pcie_modem,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sm8250_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sm8250-aggre1-noc",
.data = &sm8250_aggre1_noc},
{ .compatible = "qcom,sm8250-aggre2-noc",
.data = &sm8250_aggre2_noc},
{ .compatible = "qcom,sm8250-compute-noc",
.data = &sm8250_compute_noc},
{ .compatible = "qcom,sm8250-config-noc",
.data = &sm8250_config_noc},
{ .compatible = "qcom,sm8250-dc-noc",
.data = &sm8250_dc_noc},
{ .compatible = "qcom,sm8250-gem-noc",
.data = &sm8250_gem_noc},
{ .compatible = "qcom,sm8250-mc-virt",
.data = &sm8250_mc_virt},
{ .compatible = "qcom,sm8250-mmss-noc",
.data = &sm8250_mmss_noc},
{ .compatible = "qcom,sm8250-npu-noc",
.data = &sm8250_npu_noc},
{ .compatible = "qcom,sm8250-qup-virt",
.data = &sm8250_qup_virt },
{ .compatible = "qcom,sm8250-system-noc",
.data = &sm8250_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8250",
.of_match_table = qnoc_of_match,
},
};
module_platform_driver(qnoc_driver);
MODULE_DESCRIPTION("Qualcomm SM8250 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/sm8250.c |
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2023 Linaro Ltd
*/
#include <linux/soc/qcom/smd-rpm.h>
#include "icc-rpm.h"
const struct rpm_clk_resource aggre1_clk = {
.resource_type = QCOM_SMD_RPM_AGGR_CLK,
.clock_id = 1,
};
EXPORT_SYMBOL_GPL(aggre1_clk);
const struct rpm_clk_resource aggre2_clk = {
.resource_type = QCOM_SMD_RPM_AGGR_CLK,
.clock_id = 2,
};
EXPORT_SYMBOL_GPL(aggre2_clk);
const struct rpm_clk_resource bimc_clk = {
.resource_type = QCOM_SMD_RPM_MEM_CLK,
.clock_id = 0,
};
EXPORT_SYMBOL_GPL(bimc_clk);
const struct rpm_clk_resource bus_0_clk = {
.resource_type = QCOM_SMD_RPM_BUS_CLK,
.clock_id = 0,
};
EXPORT_SYMBOL_GPL(bus_0_clk);
const struct rpm_clk_resource bus_1_clk = {
.resource_type = QCOM_SMD_RPM_BUS_CLK,
.clock_id = 1,
};
EXPORT_SYMBOL_GPL(bus_1_clk);
const struct rpm_clk_resource bus_2_clk = {
.resource_type = QCOM_SMD_RPM_BUS_CLK,
.clock_id = 2,
};
EXPORT_SYMBOL_GPL(bus_2_clk);
const struct rpm_clk_resource mmaxi_0_clk = {
.resource_type = QCOM_SMD_RPM_MMAXI_CLK,
.clock_id = 0,
};
EXPORT_SYMBOL_GPL(mmaxi_0_clk);
const struct rpm_clk_resource mmaxi_1_clk = {
.resource_type = QCOM_SMD_RPM_MMAXI_CLK,
.clock_id = 1,
};
EXPORT_SYMBOL_GPL(mmaxi_1_clk);
const struct rpm_clk_resource qup_clk = {
.resource_type = QCOM_SMD_RPM_QUP_CLK,
.clock_id = 0,
};
EXPORT_SYMBOL_GPL(qup_clk);
/* Branch clocks */
const struct rpm_clk_resource aggre1_branch_clk = {
.resource_type = QCOM_SMD_RPM_AGGR_CLK,
.clock_id = 1,
.branch = true,
};
EXPORT_SYMBOL_GPL(aggre1_branch_clk);
const struct rpm_clk_resource aggre2_branch_clk = {
.resource_type = QCOM_SMD_RPM_AGGR_CLK,
.clock_id = 2,
.branch = true,
};
EXPORT_SYMBOL_GPL(aggre2_branch_clk);
| linux-master | drivers/interconnect/qcom/icc-rpm-clocks.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2021, Linaro Limited
*
*/
#include <linux/interconnect-provider.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sm8350.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sm8350.h"
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
.id = SM8350_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
.id = SM8350_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.id = SM8350_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
.id = SM8350_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qnm_a1noc_cfg = {
.name = "qnm_a1noc_cfg",
.id = SM8350_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_SLAVE_SERVICE_A1NOC },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
.id = SM8350_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.id = SM8350_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.id = SM8350_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
.id = SM8350_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_A1NOC_SNOC },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.id = SM8350_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qnm_a2noc_cfg = {
.name = "qnm_a2noc_cfg",
.id = SM8350_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_SLAVE_SERVICE_A2NOC },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
.id = SM8350_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.id = SM8350_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
.id = SM8350_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
.id = SM8350_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
.id = SM8350_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
.id = SM8350_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node xm_ufs_card = {
.name = "xm_ufs_card",
.id = SM8350_MASTER_UFS_CARD,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_A2NOC_SNOC },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
.id = SM8350_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 56,
.links = { SM8350_SLAVE_AHB2PHY_SOUTH,
SM8350_SLAVE_AHB2PHY_NORTH,
SM8350_SLAVE_AOSS,
SM8350_SLAVE_APPSS,
SM8350_SLAVE_CAMERA_CFG,
SM8350_SLAVE_CLK_CTL,
SM8350_SLAVE_CDSP_CFG,
SM8350_SLAVE_RBCPR_CX_CFG,
SM8350_SLAVE_RBCPR_MMCX_CFG,
SM8350_SLAVE_RBCPR_MX_CFG,
SM8350_SLAVE_CRYPTO_0_CFG,
SM8350_SLAVE_CX_RDPM,
SM8350_SLAVE_DCC_CFG,
SM8350_SLAVE_DISPLAY_CFG,
SM8350_SLAVE_GFX3D_CFG,
SM8350_SLAVE_HWKM,
SM8350_SLAVE_IMEM_CFG,
SM8350_SLAVE_IPA_CFG,
SM8350_SLAVE_IPC_ROUTER_CFG,
SM8350_SLAVE_LPASS,
SM8350_SLAVE_CNOC_MSS,
SM8350_SLAVE_MX_RDPM,
SM8350_SLAVE_PCIE_0_CFG,
SM8350_SLAVE_PCIE_1_CFG,
SM8350_SLAVE_PDM,
SM8350_SLAVE_PIMEM_CFG,
SM8350_SLAVE_PKA_WRAPPER_CFG,
SM8350_SLAVE_PMU_WRAPPER_CFG,
SM8350_SLAVE_QDSS_CFG,
SM8350_SLAVE_QSPI_0,
SM8350_SLAVE_QUP_0,
SM8350_SLAVE_QUP_1,
SM8350_SLAVE_QUP_2,
SM8350_SLAVE_SDCC_2,
SM8350_SLAVE_SDCC_4,
SM8350_SLAVE_SECURITY,
SM8350_SLAVE_SPSS_CFG,
SM8350_SLAVE_TCSR,
SM8350_SLAVE_TLMM,
SM8350_SLAVE_UFS_CARD_CFG,
SM8350_SLAVE_UFS_MEM_CFG,
SM8350_SLAVE_USB3_0,
SM8350_SLAVE_USB3_1,
SM8350_SLAVE_VENUS_CFG,
SM8350_SLAVE_VSENSE_CTRL_CFG,
SM8350_SLAVE_A1NOC_CFG,
SM8350_SLAVE_A2NOC_CFG,
SM8350_SLAVE_DDRSS_CFG,
SM8350_SLAVE_CNOC_MNOC_CFG,
SM8350_SLAVE_SNOC_CFG,
SM8350_SLAVE_BOOT_IMEM,
SM8350_SLAVE_IMEM,
SM8350_SLAVE_PIMEM,
SM8350_SLAVE_SERVICE_CNOC,
SM8350_SLAVE_QDSS_STM,
SM8350_SLAVE_TCU
},
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
.id = SM8350_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8350_SLAVE_PCIE_0,
SM8350_SLAVE_PCIE_1
},
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
.id = SM8350_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 56,
.links = { SM8350_SLAVE_AHB2PHY_SOUTH,
SM8350_SLAVE_AHB2PHY_NORTH,
SM8350_SLAVE_AOSS,
SM8350_SLAVE_APPSS,
SM8350_SLAVE_CAMERA_CFG,
SM8350_SLAVE_CLK_CTL,
SM8350_SLAVE_CDSP_CFG,
SM8350_SLAVE_RBCPR_CX_CFG,
SM8350_SLAVE_RBCPR_MMCX_CFG,
SM8350_SLAVE_RBCPR_MX_CFG,
SM8350_SLAVE_CRYPTO_0_CFG,
SM8350_SLAVE_CX_RDPM,
SM8350_SLAVE_DCC_CFG,
SM8350_SLAVE_DISPLAY_CFG,
SM8350_SLAVE_GFX3D_CFG,
SM8350_SLAVE_HWKM,
SM8350_SLAVE_IMEM_CFG,
SM8350_SLAVE_IPA_CFG,
SM8350_SLAVE_IPC_ROUTER_CFG,
SM8350_SLAVE_LPASS,
SM8350_SLAVE_CNOC_MSS,
SM8350_SLAVE_MX_RDPM,
SM8350_SLAVE_PCIE_0_CFG,
SM8350_SLAVE_PCIE_1_CFG,
SM8350_SLAVE_PDM,
SM8350_SLAVE_PIMEM_CFG,
SM8350_SLAVE_PKA_WRAPPER_CFG,
SM8350_SLAVE_PMU_WRAPPER_CFG,
SM8350_SLAVE_QDSS_CFG,
SM8350_SLAVE_QSPI_0,
SM8350_SLAVE_QUP_0,
SM8350_SLAVE_QUP_1,
SM8350_SLAVE_QUP_2,
SM8350_SLAVE_SDCC_2,
SM8350_SLAVE_SDCC_4,
SM8350_SLAVE_SECURITY,
SM8350_SLAVE_SPSS_CFG,
SM8350_SLAVE_TCSR,
SM8350_SLAVE_TLMM,
SM8350_SLAVE_UFS_CARD_CFG,
SM8350_SLAVE_UFS_MEM_CFG,
SM8350_SLAVE_USB3_0,
SM8350_SLAVE_USB3_1,
SM8350_SLAVE_VENUS_CFG,
SM8350_SLAVE_VSENSE_CTRL_CFG,
SM8350_SLAVE_A1NOC_CFG,
SM8350_SLAVE_A2NOC_CFG,
SM8350_SLAVE_DDRSS_CFG,
SM8350_SLAVE_CNOC_MNOC_CFG,
SM8350_SLAVE_SNOC_CFG,
SM8350_SLAVE_BOOT_IMEM,
SM8350_SLAVE_IMEM,
SM8350_SLAVE_PIMEM,
SM8350_SLAVE_SERVICE_CNOC,
SM8350_SLAVE_QDSS_STM,
SM8350_SLAVE_TCU
},
};
static struct qcom_icc_node qnm_cnoc_dc_noc = {
.name = "qnm_cnoc_dc_noc",
.id = SM8350_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
.links = { SM8350_SLAVE_LLCC_CFG,
SM8350_SLAVE_GEM_NOC_CFG
},
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
.id = SM8350_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8350_SLAVE_GEM_NOC_CNOC,
SM8350_SLAVE_LLCC
},
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
.id = SM8350_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
.links = { SM8350_SLAVE_GEM_NOC_CNOC,
SM8350_SLAVE_LLCC
},
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
.id = SM8350_MASTER_APPSS_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
.links = { SM8350_SLAVE_GEM_NOC_CNOC,
SM8350_SLAVE_LLCC,
SM8350_SLAVE_MEM_NOC_PCIE_SNOC
},
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
.id = SM8350_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8350_SLAVE_GEM_NOC_CNOC,
SM8350_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_gemnoc_cfg = {
.name = "qnm_gemnoc_cfg",
.id = SM8350_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 5,
.links = { SM8350_SLAVE_MSS_PROC_MS_MPU_CFG,
SM8350_SLAVE_MCDMA_MS_MPU_CFG,
SM8350_SLAVE_SERVICE_GEM_NOC_1,
SM8350_SLAVE_SERVICE_GEM_NOC_2,
SM8350_SLAVE_SERVICE_GEM_NOC
},
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
.id = SM8350_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8350_SLAVE_GEM_NOC_CNOC,
SM8350_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.id = SM8350_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.id = SM8350_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
.links = { SM8350_SLAVE_GEM_NOC_CNOC,
SM8350_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
.id = SM8350_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
.links = { SM8350_SLAVE_GEM_NOC_CNOC,
SM8350_SLAVE_LLCC
},
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.id = SM8350_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_LLCC },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.id = SM8350_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
.links = { SM8350_SLAVE_GEM_NOC_CNOC,
SM8350_SLAVE_LLCC,
SM8350_SLAVE_MEM_NOC_PCIE_SNOC
},
};
static struct qcom_icc_node qhm_config_noc = {
.name = "qhm_config_noc",
.id = SM8350_MASTER_CNOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 6,
.links = { SM8350_SLAVE_LPASS_CORE_CFG,
SM8350_SLAVE_LPASS_LPI_CFG,
SM8350_SLAVE_LPASS_MPU_CFG,
SM8350_SLAVE_LPASS_TOP_CFG,
SM8350_SLAVE_SERVICES_LPASS_AML_NOC,
SM8350_SLAVE_SERVICE_LPASS_AG_NOC
},
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
.id = SM8350_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_SLAVE_EBI1 },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
.id = SM8350_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
.id = SM8350_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
.id = SM8350_MASTER_CAMNOC_SF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_mnoc_cfg = {
.name = "qnm_mnoc_cfg",
.id = SM8350_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_SLAVE_SERVICE_MNOC },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
.id = SM8350_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video1 = {
.name = "qnm_video1",
.id = SM8350_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
.id = SM8350_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
.id = SM8350_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_mdp1 = {
.name = "qxm_mdp1",
.id = SM8350_MASTER_MDP1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
.id = SM8350_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
.name = "qhm_nsp_noc_config",
.id = SM8350_MASTER_CDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_SLAVE_SERVICE_NSP_NOC },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
.id = SM8350_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_CDSP_MEM_NOC },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
.id = SM8350_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8350_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
.id = SM8350_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8350_SLAVE_SNOC_GEM_NOC_SF },
};
static struct qcom_icc_node qnm_snoc_cfg = {
.name = "qnm_snoc_cfg",
.id = SM8350_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_SLAVE_SERVICE_SNOC },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.id = SM8350_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.id = SM8350_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_SLAVE_SNOC_GEM_NOC_GC },
};
static struct qcom_icc_node qnm_mnoc_hf_disp = {
.name = "qnm_mnoc_hf_disp",
.id = SM8350_MASTER_MNOC_HF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_LLCC_DISP },
};
static struct qcom_icc_node qnm_mnoc_sf_disp = {
.name = "qnm_mnoc_sf_disp",
.id = SM8350_MASTER_MNOC_SF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_LLCC_DISP },
};
static struct qcom_icc_node llcc_mc_disp = {
.name = "llcc_mc_disp",
.id = SM8350_MASTER_LLCC_DISP,
.channels = 4,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_SLAVE_EBI1_DISP },
};
static struct qcom_icc_node qxm_mdp0_disp = {
.name = "qxm_mdp0_disp",
.id = SM8350_MASTER_MDP0_DISP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP },
};
static struct qcom_icc_node qxm_mdp1_disp = {
.name = "qxm_mdp1_disp",
.id = SM8350_MASTER_MDP1_DISP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP },
};
static struct qcom_icc_node qxm_rot_disp = {
.name = "qxm_rot_disp",
.id = SM8350_MASTER_ROTATOR_DISP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SM8350_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8350_MASTER_A1NOC_SNOC },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
.id = SM8350_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
.id = SM8350_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8350_MASTER_A2NOC_SNOC },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
.id = SM8350_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8350_MASTER_ANOC_PCIE_GEM_NOC },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
.id = SM8350_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
.id = SM8350_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
.id = SM8350_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
.id = SM8350_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
.id = SM8350_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
.id = SM8350_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
.id = SM8350_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute_cfg = {
.name = "qhs_compute_cfg",
.id = SM8350_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
.id = SM8350_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
.id = SM8350_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
.id = SM8350_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
.id = SM8350_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
.id = SM8350_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
.id = SM8350_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
.id = SM8350_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
.id = SM8350_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_hwkm = {
.name = "qhs_hwkm",
.id = SM8350_SLAVE_HWKM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
.id = SM8350_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
.id = SM8350_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
.id = SM8350_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
.id = SM8350_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
.links = { SM8350_MASTER_CNOC_LPASS_AG_NOC },
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
.id = SM8350_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
.id = SM8350_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
.id = SM8350_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
.id = SM8350_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
.id = SM8350_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
.id = SM8350_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pka_wrapper_cfg = {
.name = "qhs_pka_wrapper_cfg",
.id = SM8350_SLAVE_PKA_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pmu_wrapper_cfg = {
.name = "qhs_pmu_wrapper_cfg",
.id = SM8350_SLAVE_PMU_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
.id = SM8350_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
.id = SM8350_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
.id = SM8350_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
.id = SM8350_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
.id = SM8350_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
.id = SM8350_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
.id = SM8350_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
.id = SM8350_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
.id = SM8350_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
.id = SM8350_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
.id = SM8350_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_card_cfg = {
.name = "qhs_ufs_card_cfg",
.id = SM8350_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
.id = SM8350_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
.id = SM8350_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_1 = {
.name = "qhs_usb3_1",
.id = SM8350_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
.id = SM8350_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
.id = SM8350_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a1_noc_cfg = {
.name = "qns_a1_noc_cfg",
.id = SM8350_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2_noc_cfg = {
.name = "qns_a2_noc_cfg",
.id = SM8350_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
.id = SM8350_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_mnoc_cfg = {
.name = "qns_mnoc_cfg",
.id = SM8350_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_snoc_cfg = {
.name = "qns_snoc_cfg",
.id = SM8350_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qxs_boot_imem = {
.name = "qxs_boot_imem",
.id = SM8350_SLAVE_BOOT_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
.id = SM8350_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
.id = SM8350_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
.id = SM8350_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
.id = SM8350_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
.id = SM8350_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
.id = SM8350_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
.id = SM8350_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
.id = SM8350_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gemnoc = {
.name = "qns_gemnoc",
.id = SM8350_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
.id = SM8350_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_modem_ms_mpu_cfg = {
.name = "qhs_modem_ms_mpu_cfg",
.id = SM8350_SLAVE_MCDMA_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
.id = SM8350_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8350_MASTER_GEM_NOC_CNOC },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
.id = SM8350_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
.links = { SM8350_MASTER_LLCC },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
.id = SM8350_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_even_gemnoc = {
.name = "srvc_even_gemnoc",
.id = SM8350_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_odd_gemnoc = {
.name = "srvc_odd_gemnoc",
.id = SM8350_SLAVE_SERVICE_GEM_NOC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_sys_gemnoc = {
.name = "srvc_sys_gemnoc",
.id = SM8350_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_core = {
.name = "qhs_lpass_core",
.id = SM8350_SLAVE_LPASS_CORE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_lpi = {
.name = "qhs_lpass_lpi",
.id = SM8350_SLAVE_LPASS_LPI_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_mpu = {
.name = "qhs_lpass_mpu",
.id = SM8350_SLAVE_LPASS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_top = {
.name = "qhs_lpass_top",
.id = SM8350_SLAVE_LPASS_TOP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_niu_aml_noc = {
.name = "srvc_niu_aml_noc",
.id = SM8350_SLAVE_SERVICES_LPASS_AML_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_niu_lpass_agnoc = {
.name = "srvc_niu_lpass_agnoc",
.id = SM8350_SLAVE_SERVICE_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
.id = SM8350_SLAVE_EBI1,
.channels = 4,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
.id = SM8350_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_MASTER_MNOC_HF_MEM_NOC },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
.id = SM8350_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_MASTER_MNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
.id = SM8350_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
.id = SM8350_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_MASTER_COMPUTE_NOC },
};
static struct qcom_icc_node service_nsp_noc = {
.name = "service_nsp_noc",
.id = SM8350_SLAVE_SERVICE_NSP_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
.id = SM8350_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
.links = { SM8350_MASTER_SNOC_GC_MEM_NOC },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
.id = SM8350_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
.links = { SM8350_MASTER_SNOC_SF_MEM_NOC },
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
.id = SM8350_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_llcc_disp = {
.name = "qns_llcc_disp",
.id = SM8350_SLAVE_LLCC_DISP,
.channels = 4,
.buswidth = 16,
.num_links = 1,
.links = { SM8350_MASTER_LLCC_DISP },
};
static struct qcom_icc_node ebi_disp = {
.name = "ebi_disp",
.id = SM8350_SLAVE_EBI1_DISP,
.channels = 4,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf_disp = {
.name = "qns_mem_noc_hf_disp",
.id = SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_MASTER_MNOC_HF_MEM_NOC_DISP },
};
static struct qcom_icc_node qns_mem_noc_sf_disp = {
.name = "qns_mem_noc_sf_disp",
.id = SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
.links = { SM8350_MASTER_MNOC_SF_MEM_NOC_DISP },
};
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_ce0 = {
.name = "CE0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_crypto },
};
static struct qcom_icc_bcm bcm_cn0 = {
.name = "CN0",
.keepalive = true,
.num_nodes = 2,
.nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie },
};
static struct qcom_icc_bcm bcm_cn1 = {
.name = "CN1",
.keepalive = false,
.num_nodes = 47,
.nodes = { &xm_qdss_dap,
&qhs_ahb2phy0,
&qhs_ahb2phy1,
&qhs_aoss,
&qhs_apss,
&qhs_camera_cfg,
&qhs_clk_ctl,
&qhs_compute_cfg,
&qhs_cpr_cx,
&qhs_cpr_mmcx,
&qhs_cpr_mx,
&qhs_crypto0_cfg,
&qhs_cx_rdpm,
&qhs_dcc_cfg,
&qhs_display_cfg,
&qhs_gpuss_cfg,
&qhs_hwkm,
&qhs_imem_cfg,
&qhs_ipa,
&qhs_ipc_router,
&qhs_mss_cfg,
&qhs_mx_rdpm,
&qhs_pcie0_cfg,
&qhs_pcie1_cfg,
&qhs_pimem_cfg,
&qhs_pka_wrapper_cfg,
&qhs_pmu_wrapper_cfg,
&qhs_qdss_cfg,
&qhs_qup0,
&qhs_qup1,
&qhs_qup2,
&qhs_security,
&qhs_spss_cfg,
&qhs_tcsr,
&qhs_tlmm,
&qhs_ufs_card_cfg,
&qhs_ufs_mem_cfg,
&qhs_usb3_0,
&qhs_usb3_1,
&qhs_venus_cfg,
&qhs_vsense_ctrl_cfg,
&qns_a1_noc_cfg,
&qns_a2_noc_cfg,
&qns_ddrss_cfg,
&qns_mnoc_cfg,
&qns_snoc_cfg,
&srvc_cnoc
},
};
static struct qcom_icc_bcm bcm_cn2 = {
.name = "CN2",
.keepalive = false,
.num_nodes = 5,
.nodes = { &qhs_lpass_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2, &qhs_sdc4 },
};
static struct qcom_icc_bcm bcm_co0 = {
.name = "CO0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_nsp_gemnoc },
};
static struct qcom_icc_bcm bcm_co3 = {
.name = "CO3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_nsp },
};
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &ebi },
};
static struct qcom_icc_bcm bcm_mm0 = {
.name = "MM0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_bcm bcm_mm1 = {
.name = "MM1",
.keepalive = false,
.num_nodes = 3,
.nodes = { &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1 },
};
static struct qcom_icc_bcm bcm_mm4 = {
.name = "MM4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_bcm bcm_mm5 = {
.name = "MM5",
.keepalive = false,
.num_nodes = 6,
.nodes = { &qnm_camnoc_icp,
&qnm_camnoc_sf,
&qnm_video0,
&qnm_video1,
&qnm_video_cvp,
&qxm_rot
},
};
static struct qcom_icc_bcm bcm_sh0 = {
.name = "SH0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_llcc },
};
static struct qcom_icc_bcm bcm_sh2 = {
.name = "SH2",
.keepalive = false,
.num_nodes = 2,
.nodes = { &alm_gpu_tcu, &alm_sys_tcu },
};
static struct qcom_icc_bcm bcm_sh3 = {
.name = "SH3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_bcm bcm_sh4 = {
.name = "SH4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &chm_apps },
};
static struct qcom_icc_bcm bcm_sn0 = {
.name = "SN0",
.keepalive = true,
.num_nodes = 1,
.nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_bcm bcm_sn2 = {
.name = "SN2",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxs_pimem },
};
static struct qcom_icc_bcm bcm_sn4 = {
.name = "SN4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xs_qdss_stm },
};
static struct qcom_icc_bcm bcm_sn5 = {
.name = "SN5",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xm_pcie3_0 },
};
static struct qcom_icc_bcm bcm_sn6 = {
.name = "SN6",
.keepalive = false,
.num_nodes = 1,
.nodes = { &xm_pcie3_1 },
};
static struct qcom_icc_bcm bcm_sn7 = {
.name = "SN7",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_bcm bcm_sn8 = {
.name = "SN8",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_bcm bcm_sn14 = {
.name = "SN14",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_bcm bcm_acv_disp = {
.name = "ACV",
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi_disp },
};
static struct qcom_icc_bcm bcm_mc0_disp = {
.name = "MC0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &ebi_disp },
};
static struct qcom_icc_bcm bcm_mm0_disp = {
.name = "MM0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_mem_noc_hf_disp },
};
static struct qcom_icc_bcm bcm_mm1_disp = {
.name = "MM1",
.keepalive = false,
.num_nodes = 2,
.nodes = { &qxm_mdp0_disp, &qxm_mdp1_disp },
};
static struct qcom_icc_bcm bcm_mm4_disp = {
.name = "MM4",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_mem_noc_sf_disp },
};
static struct qcom_icc_bcm bcm_mm5_disp = {
.name = "MM5",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qxm_rot_disp },
};
static struct qcom_icc_bcm bcm_sh0_disp = {
.name = "SH0",
.keepalive = false,
.num_nodes = 1,
.nodes = { &qns_llcc_disp },
};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QSPI_0] = &qhm_qspi,
[MASTER_QUP_1] = &qhm_qup1,
[MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
[MASTER_SDCC_4] = &xm_sdc4,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[MASTER_USB3_0] = &xm_usb3_0,
[MASTER_USB3_1] = &xm_usb3_1,
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
static const struct qcom_icc_desc sm8350_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_sn5,
&bcm_sn6,
&bcm_sn14,
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_2] = &qhm_qup2,
[MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
[MASTER_CRYPTO] = &qxm_crypto,
[MASTER_IPA] = &qxm_ipa,
[MASTER_PCIE_0] = &xm_pcie3_0,
[MASTER_PCIE_1] = &xm_pcie3_1,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[MASTER_SDCC_2] = &xm_sdc2,
[MASTER_UFS_CARD] = &xm_ufs_card,
[SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
static const struct qcom_icc_desc sm8350_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
&bcm_cn2,
&bcm_sn3,
&bcm_sn4,
};
static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
[SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_APPSS] = &qhs_apss,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CDSP_CFG] = &qhs_compute_cfg,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
[SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_CX_RDPM] = &qhs_cx_rdpm,
[SLAVE_DCC_CFG] = &qhs_dcc_cfg,
[SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
[SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_HWKM] = &qhs_hwkm,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
[SLAVE_LPASS] = &qhs_lpass_cfg,
[SLAVE_CNOC_MSS] = &qhs_mss_cfg,
[SLAVE_MX_RDPM] = &qhs_mx_rdpm,
[SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
[SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PKA_WRAPPER_CFG] = &qhs_pka_wrapper_cfg,
[SLAVE_PMU_WRAPPER_CFG] = &qhs_pmu_wrapper_cfg,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QSPI_0] = &qhs_qspi,
[SLAVE_QUP_0] = &qhs_qup0,
[SLAVE_QUP_1] = &qhs_qup1,
[SLAVE_QUP_2] = &qhs_qup2,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SDCC_4] = &qhs_sdc4,
[SLAVE_SECURITY] = &qhs_security,
[SLAVE_SPSS_CFG] = &qhs_spss_cfg,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM] = &qhs_tlmm,
[SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB3_0] = &qhs_usb3_0,
[SLAVE_USB3_1] = &qhs_usb3_1,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg,
[SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
[SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg,
[SLAVE_SNOC_CFG] = &qns_snoc_cfg,
[SLAVE_BOOT_IMEM] = &qxs_boot_imem,
[SLAVE_IMEM] = &qxs_imem,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
[SLAVE_PCIE_0] = &xs_pcie_0,
[SLAVE_PCIE_1] = &xs_pcie_1,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sm8350_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
};
static const struct qcom_icc_desc sm8350_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
&bcm_sh0_disp,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
[MASTER_APPSS_PROC] = &chm_apps,
[MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
[MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg,
[MASTER_GFX3D] = &qnm_gpu,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
[SLAVE_MCDMA_MS_MPU_CFG] = &qhs_modem_ms_mpu_cfg,
[SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
[SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
[SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
[SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
[MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
[MASTER_MNOC_SF_MEM_NOC_DISP] = &qnm_mnoc_sf_disp,
[SLAVE_LLCC_DISP] = &qns_llcc_disp,
};
static const struct qcom_icc_desc sm8350_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
};
static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
[SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
[SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
[SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
[SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
[SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
};
static const struct qcom_icc_desc sm8350_lpass_ag_noc = {
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
&bcm_acv_disp,
&bcm_mc0_disp,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
[MASTER_LLCC_DISP] = &llcc_mc_disp,
[SLAVE_EBI1_DISP] = &ebi_disp,
};
static const struct qcom_icc_desc sm8350_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm4,
&bcm_mm5,
&bcm_mm0_disp,
&bcm_mm1_disp,
&bcm_mm4_disp,
&bcm_mm5_disp,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
[MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
[MASTER_VIDEO_P0] = &qnm_video0,
[MASTER_VIDEO_P1] = &qnm_video1,
[MASTER_VIDEO_PROC] = &qnm_video_cvp,
[MASTER_MDP0] = &qxm_mdp0,
[MASTER_MDP1] = &qxm_mdp1,
[MASTER_ROTATOR] = &qxm_rot,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
[MASTER_MDP0_DISP] = &qxm_mdp0_disp,
[MASTER_MDP1_DISP] = &qxm_mdp1_disp,
[MASTER_ROTATOR_DISP] = &qxm_rot_disp,
[SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
[SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
};
static const struct qcom_icc_desc sm8350_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
&bcm_co0,
&bcm_co3,
};
static struct qcom_icc_node * const nsp_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
[SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
};
static const struct qcom_icc_desc sm8350_compute_noc = {
.nodes = nsp_noc_nodes,
.num_nodes = ARRAY_SIZE(nsp_noc_nodes),
.bcms = nsp_noc_bcms,
.num_bcms = ARRAY_SIZE(nsp_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn2,
&bcm_sn7,
&bcm_sn8,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
[MASTER_SNOC_CFG] = &qnm_snoc_cfg,
[MASTER_PIMEM] = &qxm_pimem,
[MASTER_GIC] = &xm_gic,
[SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
};
static const struct qcom_icc_desc sm8350_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sm8350-aggre1-noc", .data = &sm8350_aggre1_noc},
{ .compatible = "qcom,sm8350-aggre2-noc", .data = &sm8350_aggre2_noc},
{ .compatible = "qcom,sm8350-config-noc", .data = &sm8350_config_noc},
{ .compatible = "qcom,sm8350-dc-noc", .data = &sm8350_dc_noc},
{ .compatible = "qcom,sm8350-gem-noc", .data = &sm8350_gem_noc},
{ .compatible = "qcom,sm8350-lpass-ag-noc", .data = &sm8350_lpass_ag_noc},
{ .compatible = "qcom,sm8350-mc-virt", .data = &sm8350_mc_virt},
{ .compatible = "qcom,sm8350-mmss-noc", .data = &sm8350_mmss_noc},
{ .compatible = "qcom,sm8350-compute-noc", .data = &sm8350_compute_noc},
{ .compatible = "qcom,sm8350-system-noc", .data = &sm8350_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm8350",
.of_match_table = qnoc_of_match,
},
};
module_platform_driver(qnoc_driver);
MODULE_DESCRIPTION("SM8350 NoC driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/sm8350.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,osm-l3.h>
#define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30)
#define LUT_L_VAL GENMASK(7, 0)
#define CLK_HW_DIV 2
/* OSM Register offsets */
#define REG_ENABLE 0x0
#define OSM_LUT_ROW_SIZE 32
#define OSM_REG_FREQ_LUT 0x110
#define OSM_REG_PERF_STATE 0x920
/* EPSS Register offsets */
#define EPSS_LUT_ROW_SIZE 4
#define EPSS_REG_L3_VOTE 0x90
#define EPSS_REG_FREQ_LUT 0x100
#define EPSS_REG_PERF_STATE 0x320
#define OSM_L3_MAX_LINKS 1
#define to_osm_l3_provider(_provider) \
container_of(_provider, struct qcom_osm_l3_icc_provider, provider)
struct qcom_osm_l3_icc_provider {
void __iomem *base;
unsigned int max_state;
unsigned int reg_perf_state;
unsigned long lut_tables[LUT_MAX_ENTRIES];
struct icc_provider provider;
};
/**
* struct qcom_osm_l3_node - Qualcomm specific interconnect nodes
* @name: the node name used in debugfs
* @links: an array of nodes where we can go next while traversing
* @id: a unique node identifier
* @num_links: the total number of @links
* @buswidth: width of the interconnect between a node and the bus
*/
struct qcom_osm_l3_node {
const char *name;
u16 links[OSM_L3_MAX_LINKS];
u16 id;
u16 num_links;
u16 buswidth;
};
struct qcom_osm_l3_desc {
const struct qcom_osm_l3_node * const *nodes;
size_t num_nodes;
unsigned int lut_row_size;
unsigned int reg_freq_lut;
unsigned int reg_perf_state;
};
enum {
OSM_L3_MASTER_NODE = 10000,
OSM_L3_SLAVE_NODE,
};
#define DEFINE_QNODE(_name, _id, _buswidth, ...) \
static const struct qcom_osm_l3_node _name = { \
.name = #_name, \
.id = _id, \
.buswidth = _buswidth, \
.num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
.links = { __VA_ARGS__ }, \
}
DEFINE_QNODE(osm_l3_master, OSM_L3_MASTER_NODE, 16, OSM_L3_SLAVE_NODE);
DEFINE_QNODE(osm_l3_slave, OSM_L3_SLAVE_NODE, 16);
static const struct qcom_osm_l3_node * const osm_l3_nodes[] = {
[MASTER_OSM_L3_APPS] = &osm_l3_master,
[SLAVE_OSM_L3] = &osm_l3_slave,
};
DEFINE_QNODE(epss_l3_master, OSM_L3_MASTER_NODE, 32, OSM_L3_SLAVE_NODE);
DEFINE_QNODE(epss_l3_slave, OSM_L3_SLAVE_NODE, 32);
static const struct qcom_osm_l3_node * const epss_l3_nodes[] = {
[MASTER_EPSS_L3_APPS] = &epss_l3_master,
[SLAVE_EPSS_L3_SHARED] = &epss_l3_slave,
};
static const struct qcom_osm_l3_desc osm_l3 = {
.nodes = osm_l3_nodes,
.num_nodes = ARRAY_SIZE(osm_l3_nodes),
.lut_row_size = OSM_LUT_ROW_SIZE,
.reg_freq_lut = OSM_REG_FREQ_LUT,
.reg_perf_state = OSM_REG_PERF_STATE,
};
static const struct qcom_osm_l3_desc epss_l3_perf_state = {
.nodes = epss_l3_nodes,
.num_nodes = ARRAY_SIZE(epss_l3_nodes),
.lut_row_size = EPSS_LUT_ROW_SIZE,
.reg_freq_lut = EPSS_REG_FREQ_LUT,
.reg_perf_state = EPSS_REG_PERF_STATE,
};
static const struct qcom_osm_l3_desc epss_l3_l3_vote = {
.nodes = epss_l3_nodes,
.num_nodes = ARRAY_SIZE(epss_l3_nodes),
.lut_row_size = EPSS_LUT_ROW_SIZE,
.reg_freq_lut = EPSS_REG_FREQ_LUT,
.reg_perf_state = EPSS_REG_L3_VOTE,
};
static int qcom_osm_l3_set(struct icc_node *src, struct icc_node *dst)
{
struct qcom_osm_l3_icc_provider *qp;
struct icc_provider *provider;
const struct qcom_osm_l3_node *qn;
unsigned int index;
u64 rate;
qn = src->data;
provider = src->provider;
qp = to_osm_l3_provider(provider);
rate = icc_units_to_bps(dst->peak_bw);
do_div(rate, qn->buswidth);
for (index = 0; index < qp->max_state - 1; index++) {
if (qp->lut_tables[index] >= rate)
break;
}
writel_relaxed(index, qp->base + qp->reg_perf_state);
return 0;
}
static int qcom_osm_l3_remove(struct platform_device *pdev)
{
struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev);
icc_provider_deregister(&qp->provider);
icc_nodes_remove(&qp->provider);
return 0;
}
static int qcom_osm_l3_probe(struct platform_device *pdev)
{
u32 info, src, lval, i, prev_freq = 0, freq;
static unsigned long hw_rate, xo_rate;
struct qcom_osm_l3_icc_provider *qp;
const struct qcom_osm_l3_desc *desc;
struct icc_onecell_data *data;
struct icc_provider *provider;
const struct qcom_osm_l3_node * const *qnodes;
struct icc_node *node;
size_t num_nodes;
struct clk *clk;
int ret;
clk = clk_get(&pdev->dev, "xo");
if (IS_ERR(clk))
return PTR_ERR(clk);
xo_rate = clk_get_rate(clk);
clk_put(clk);
clk = clk_get(&pdev->dev, "alternate");
if (IS_ERR(clk))
return PTR_ERR(clk);
hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
clk_put(clk);
qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
if (!qp)
return -ENOMEM;
qp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(qp->base))
return PTR_ERR(qp->base);
/* HW should be in enabled state to proceed */
if (!(readl_relaxed(qp->base + REG_ENABLE) & 0x1)) {
dev_err(&pdev->dev, "error hardware not enabled\n");
return -ENODEV;
}
desc = device_get_match_data(&pdev->dev);
if (!desc)
return -EINVAL;
qp->reg_perf_state = desc->reg_perf_state;
for (i = 0; i < LUT_MAX_ENTRIES; i++) {
info = readl_relaxed(qp->base + desc->reg_freq_lut +
i * desc->lut_row_size);
src = FIELD_GET(LUT_SRC, info);
lval = FIELD_GET(LUT_L_VAL, info);
if (src)
freq = xo_rate * lval;
else
freq = hw_rate;
/* Two of the same frequencies signify end of table */
if (i > 0 && prev_freq == freq)
break;
dev_dbg(&pdev->dev, "index=%d freq=%d\n", i, freq);
qp->lut_tables[i] = freq;
prev_freq = freq;
}
qp->max_state = i;
qnodes = desc->nodes;
num_nodes = desc->num_nodes;
data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->num_nodes = num_nodes;
provider = &qp->provider;
provider->dev = &pdev->dev;
provider->set = qcom_osm_l3_set;
provider->aggregate = icc_std_aggregate;
provider->xlate = of_icc_xlate_onecell;
provider->data = data;
icc_provider_init(provider);
for (i = 0; i < num_nodes; i++) {
size_t j;
node = icc_node_create(qnodes[i]->id);
if (IS_ERR(node)) {
ret = PTR_ERR(node);
goto err;
}
node->name = qnodes[i]->name;
/* Cast away const and add it back in qcom_osm_l3_set() */
node->data = (void *)qnodes[i];
icc_node_add(node, provider);
for (j = 0; j < qnodes[i]->num_links; j++)
icc_link_create(node, qnodes[i]->links[j]);
data->nodes[i] = node;
}
ret = icc_provider_register(provider);
if (ret)
goto err;
platform_set_drvdata(pdev, qp);
return 0;
err:
icc_nodes_remove(provider);
return ret;
}
static const struct of_device_id osm_l3_of_match[] = {
{ .compatible = "qcom,epss-l3", .data = &epss_l3_l3_vote },
{ .compatible = "qcom,osm-l3", .data = &osm_l3 },
{ .compatible = "qcom,sc7180-osm-l3", .data = &osm_l3 },
{ .compatible = "qcom,sc7280-epss-l3", .data = &epss_l3_perf_state },
{ .compatible = "qcom,sdm845-osm-l3", .data = &osm_l3 },
{ .compatible = "qcom,sm8150-osm-l3", .data = &osm_l3 },
{ .compatible = "qcom,sc8180x-osm-l3", .data = &osm_l3 },
{ .compatible = "qcom,sm8250-epss-l3", .data = &epss_l3_perf_state },
{ }
};
MODULE_DEVICE_TABLE(of, osm_l3_of_match);
static struct platform_driver osm_l3_driver = {
.probe = qcom_osm_l3_probe,
.remove = qcom_osm_l3_remove,
.driver = {
.name = "osm-l3",
.of_match_table = osm_l3_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(osm_l3_driver);
MODULE_DESCRIPTION("Qualcomm OSM L3 interconnect driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/interconnect/qcom/osm-l3.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.