python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-or-later /* * kmsg dumper that ensures the OPAL console fully flushes panic messages * * Author: Russell Currey <[email protected]> * * Copyright 2015 IBM Corporation. */ #include <linux/kmsg_dump.h> #include <asm/opal.h> #include <asm/opal-api.h> /* * Console output is controlled by OPAL firmware. The kernel regularly calls * OPAL_POLL_EVENTS, which flushes some console output. In a panic state, * however, the kernel no longer calls OPAL_POLL_EVENTS and the panic message * may not be completely printed. This function does not actually dump the * message, it just ensures that OPAL completely flushes the console buffer. */ static void kmsg_dump_opal_console_flush(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason) { /* * Outside of a panic context the pollers will continue to run, * so we don't need to do any special flushing. */ if (reason != KMSG_DUMP_PANIC) return; opal_flush_console(0); } static struct kmsg_dumper opal_kmsg_dumper = { .dump = kmsg_dump_opal_console_flush }; void __init opal_kmsg_init(void) { int rc; /* Add our dumper to the list */ rc = kmsg_dump_register(&opal_kmsg_dumper); if (rc != 0) pr_err("opal: kmsg_dump_register failed; returned %d\n", rc); }
linux-master
arch/powerpc/platforms/powernv/opal-kmsg.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV OPAL Power-Shift-Ratio interface * * Copyright 2017 IBM Corp. */ #define pr_fmt(fmt) "opal-psr: " fmt #include <linux/of.h> #include <linux/kobject.h> #include <linux/slab.h> #include <asm/opal.h> static DEFINE_MUTEX(psr_mutex); static struct kobject *psr_kobj; static struct psr_attr { u32 handle; struct kobj_attribute attr; } *psr_attrs; static ssize_t psr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct psr_attr *psr_attr = container_of(attr, struct psr_attr, attr); struct opal_msg msg; int psr, ret, token; token = opal_async_get_token_interruptible(); if (token < 0) { pr_devel("Failed to get token\n"); return token; } ret = mutex_lock_interruptible(&psr_mutex); if (ret) goto out_token; ret = opal_get_power_shift_ratio(psr_attr->handle, token, (u32 *)__pa(&psr)); switch (ret) { case OPAL_ASYNC_COMPLETION: ret = opal_async_wait_response(token, &msg); if (ret) { pr_devel("Failed to wait for the async response\n"); ret = -EIO; goto out; } ret = opal_error_code(opal_get_async_rc(msg)); if (!ret) { ret = sprintf(buf, "%u\n", be32_to_cpu(psr)); if (ret < 0) ret = -EIO; } break; case OPAL_SUCCESS: ret = sprintf(buf, "%u\n", be32_to_cpu(psr)); if (ret < 0) ret = -EIO; break; default: ret = opal_error_code(ret); } out: mutex_unlock(&psr_mutex); out_token: opal_async_release_token(token); return ret; } static ssize_t psr_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct psr_attr *psr_attr = container_of(attr, struct psr_attr, attr); struct opal_msg msg; int psr, ret, token; ret = kstrtoint(buf, 0, &psr); if (ret) return ret; token = opal_async_get_token_interruptible(); if (token < 0) { pr_devel("Failed to get token\n"); return token; } ret = mutex_lock_interruptible(&psr_mutex); if (ret) goto out_token; ret = opal_set_power_shift_ratio(psr_attr->handle, token, psr); switch (ret) { case OPAL_ASYNC_COMPLETION: ret = opal_async_wait_response(token, &msg); if (ret) { pr_devel("Failed to wait for the async response\n"); ret = -EIO; goto out; } ret = opal_error_code(opal_get_async_rc(msg)); if (!ret) ret = count; break; case OPAL_SUCCESS: ret = count; break; default: ret = opal_error_code(ret); } out: mutex_unlock(&psr_mutex); out_token: opal_async_release_token(token); return ret; } void __init opal_psr_init(void) { struct device_node *psr, *node; int i = 0; psr = of_find_compatible_node(NULL, NULL, "ibm,opal-power-shift-ratio"); if (!psr) { pr_devel("Power-shift-ratio node not found\n"); return; } psr_attrs = kcalloc(of_get_child_count(psr), sizeof(*psr_attrs), GFP_KERNEL); if (!psr_attrs) goto out_put_psr; psr_kobj = kobject_create_and_add("psr", opal_kobj); if (!psr_kobj) { pr_warn("Failed to create psr kobject\n"); goto out; } for_each_child_of_node(psr, node) { if (of_property_read_u32(node, "handle", &psr_attrs[i].handle)) goto out_kobj; sysfs_attr_init(&psr_attrs[i].attr.attr); if (of_property_read_string(node, "label", &psr_attrs[i].attr.attr.name)) goto out_kobj; psr_attrs[i].attr.attr.mode = 0664; psr_attrs[i].attr.show = psr_show; psr_attrs[i].attr.store = psr_store; if (sysfs_create_file(psr_kobj, &psr_attrs[i].attr.attr)) { pr_devel("Failed to create psr sysfs file %s\n", psr_attrs[i].attr.attr.name); goto out_kobj; } i++; } of_node_put(psr); return; out_kobj: of_node_put(node); kobject_put(psr_kobj); out: kfree(psr_attrs); out_put_psr: of_node_put(psr); }
linux-master
arch/powerpc/platforms/powernv/opal-psr.c
// SPDX-License-Identifier: GPL-2.0-only /* * OPAL Runtime Diagnostics interface driver * Supported on POWERNV platform * * Copyright IBM Corporation 2015 */ #define pr_fmt(fmt) "opal-prd: " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/poll.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/opal-prd.h> #include <asm/opal.h> #include <asm/io.h> #include <linux/uaccess.h> struct opal_prd_msg { union { struct opal_prd_msg_header header; DECLARE_FLEX_ARRAY(u8, data); }; }; /* * The msg member must be at the end of the struct, as it's followed by the * message data. */ struct opal_prd_msg_queue_item { struct list_head list; struct opal_prd_msg msg; }; static struct device_node *prd_node; static LIST_HEAD(opal_prd_msg_queue); static DEFINE_SPINLOCK(opal_prd_msg_queue_lock); static DECLARE_WAIT_QUEUE_HEAD(opal_prd_msg_wait); static atomic_t prd_usage; static bool opal_prd_range_is_valid(uint64_t addr, uint64_t size) { struct device_node *parent, *node; bool found; if (addr + size < addr) return false; parent = of_find_node_by_path("/reserved-memory"); if (!parent) return false; found = false; for_each_child_of_node(parent, node) { uint64_t range_addr, range_size, range_end; const __be32 *addrp; const char *label; addrp = of_get_address(node, 0, &range_size, NULL); range_addr = of_read_number(addrp, 2); range_end = range_addr + range_size; label = of_get_property(node, "ibm,prd-label", NULL); /* PRD ranges need a label */ if (!label) continue; if (range_end <= range_addr) continue; if (addr >= range_addr && addr + size <= range_end) { found = true; of_node_put(node); break; } } of_node_put(parent); return found; } static int opal_prd_open(struct inode *inode, struct file *file) { /* * Prevent multiple (separate) processes from concurrent interactions * with the FW PRD channel */ if (atomic_xchg(&prd_usage, 1) == 1) return -EBUSY; return 0; } /* * opal_prd_mmap - maps firmware-provided ranges into userspace * @file: file structure for the device * @vma: VMA to map the registers into */ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma) { size_t addr, size; pgprot_t page_prot; pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n", vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_flags); addr = vma->vm_pgoff << PAGE_SHIFT; size = vma->vm_end - vma->vm_start; /* ensure we're mapping within one of the allowable ranges */ if (!opal_prd_range_is_valid(addr, size)) return -EINVAL; page_prot = phys_mem_access_prot(file, vma->vm_pgoff, size, vma->vm_page_prot); return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size, page_prot); } static bool opal_msg_queue_empty(void) { unsigned long flags; bool ret; spin_lock_irqsave(&opal_prd_msg_queue_lock, flags); ret = list_empty(&opal_prd_msg_queue); spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags); return ret; } static __poll_t opal_prd_poll(struct file *file, struct poll_table_struct *wait) { poll_wait(file, &opal_prd_msg_wait, wait); if (!opal_msg_queue_empty()) return EPOLLIN | EPOLLRDNORM; return 0; } static ssize_t opal_prd_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct opal_prd_msg_queue_item *item; unsigned long flags; ssize_t size, err; int rc; /* we need at least a header's worth of data */ if (count < sizeof(item->msg.header)) return -EINVAL; if (*ppos) return -ESPIPE; item = NULL; for (;;) { spin_lock_irqsave(&opal_prd_msg_queue_lock, flags); if (!list_empty(&opal_prd_msg_queue)) { item = list_first_entry(&opal_prd_msg_queue, struct opal_prd_msg_queue_item, list); list_del(&item->list); } spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags); if (item) break; if (file->f_flags & O_NONBLOCK) return -EAGAIN; rc = wait_event_interruptible(opal_prd_msg_wait, !opal_msg_queue_empty()); if (rc) return -EINTR; } size = be16_to_cpu(item->msg.header.size); if (size > count) { err = -EINVAL; goto err_requeue; } rc = copy_to_user(buf, &item->msg, size); if (rc) { err = -EFAULT; goto err_requeue; } kfree(item); return size; err_requeue: /* eep! re-queue at the head of the list */ spin_lock_irqsave(&opal_prd_msg_queue_lock, flags); list_add(&item->list, &opal_prd_msg_queue); spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags); return err; } static ssize_t opal_prd_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct opal_prd_msg_header hdr; struct opal_prd_msg *msg; ssize_t size; int rc; size = sizeof(hdr); if (count < size) return -EINVAL; /* grab the header */ rc = copy_from_user(&hdr, buf, sizeof(hdr)); if (rc) return -EFAULT; size = be16_to_cpu(hdr.size); msg = memdup_user(buf, size); if (IS_ERR(msg)) return PTR_ERR(msg); rc = opal_prd_msg(msg); if (rc) { pr_warn("write: opal_prd_msg returned %d\n", rc); size = -EIO; } kfree(msg); return size; } static int opal_prd_release(struct inode *inode, struct file *file) { struct opal_prd_msg msg; msg.header.size = cpu_to_be16(sizeof(msg)); msg.header.type = OPAL_PRD_MSG_TYPE_FINI; opal_prd_msg(&msg); atomic_xchg(&prd_usage, 0); return 0; } static long opal_prd_ioctl(struct file *file, unsigned int cmd, unsigned long param) { struct opal_prd_info info; struct opal_prd_scom scom; int rc = 0; switch (cmd) { case OPAL_PRD_GET_INFO: memset(&info, 0, sizeof(info)); info.version = OPAL_PRD_KERNEL_VERSION; rc = copy_to_user((void __user *)param, &info, sizeof(info)); if (rc) return -EFAULT; break; case OPAL_PRD_SCOM_READ: rc = copy_from_user(&scom, (void __user *)param, sizeof(scom)); if (rc) return -EFAULT; scom.rc = opal_xscom_read(scom.chip, scom.addr, (__be64 *)&scom.data); scom.data = be64_to_cpu(scom.data); pr_devel("ioctl SCOM_READ: chip %llx addr %016llx data %016llx rc %lld\n", scom.chip, scom.addr, scom.data, scom.rc); rc = copy_to_user((void __user *)param, &scom, sizeof(scom)); if (rc) return -EFAULT; break; case OPAL_PRD_SCOM_WRITE: rc = copy_from_user(&scom, (void __user *)param, sizeof(scom)); if (rc) return -EFAULT; scom.rc = opal_xscom_write(scom.chip, scom.addr, scom.data); pr_devel("ioctl SCOM_WRITE: chip %llx addr %016llx data %016llx rc %lld\n", scom.chip, scom.addr, scom.data, scom.rc); rc = copy_to_user((void __user *)param, &scom, sizeof(scom)); if (rc) return -EFAULT; break; default: rc = -EINVAL; } return rc; } static const struct file_operations opal_prd_fops = { .open = opal_prd_open, .mmap = opal_prd_mmap, .poll = opal_prd_poll, .read = opal_prd_read, .write = opal_prd_write, .unlocked_ioctl = opal_prd_ioctl, .release = opal_prd_release, .owner = THIS_MODULE, }; static struct miscdevice opal_prd_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "opal-prd", .fops = &opal_prd_fops, }; /* opal interface */ static int opal_prd_msg_notifier(struct notifier_block *nb, unsigned long msg_type, void *_msg) { struct opal_prd_msg_queue_item *item; struct opal_prd_msg_header *hdr; struct opal_msg *msg = _msg; int msg_size, item_size; unsigned long flags; if (msg_type != OPAL_MSG_PRD && msg_type != OPAL_MSG_PRD2) return 0; /* Calculate total size of the message and item we need to store. The * 'size' field in the header includes the header itself. */ hdr = (void *)msg->params; msg_size = be16_to_cpu(hdr->size); item_size = msg_size + sizeof(*item) - sizeof(item->msg); item = kzalloc(item_size, GFP_ATOMIC); if (!item) return -ENOMEM; memcpy(&item->msg.data, msg->params, msg_size); spin_lock_irqsave(&opal_prd_msg_queue_lock, flags); list_add_tail(&item->list, &opal_prd_msg_queue); spin_unlock_irqrestore(&opal_prd_msg_queue_lock, flags); wake_up_interruptible(&opal_prd_msg_wait); return 0; } static struct notifier_block opal_prd_event_nb = { .notifier_call = opal_prd_msg_notifier, .next = NULL, .priority = 0, }; static struct notifier_block opal_prd_event_nb2 = { .notifier_call = opal_prd_msg_notifier, .next = NULL, .priority = 0, }; static int opal_prd_probe(struct platform_device *pdev) { int rc; if (!pdev || !pdev->dev.of_node) return -ENODEV; /* We should only have one prd driver instance per machine; ensure * that we only get a valid probe on a single OF node. */ if (prd_node) return -EBUSY; prd_node = pdev->dev.of_node; rc = opal_message_notifier_register(OPAL_MSG_PRD, &opal_prd_event_nb); if (rc) { pr_err("Couldn't register event notifier\n"); return rc; } rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb2); if (rc) { pr_err("Couldn't register PRD2 event notifier\n"); opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb); return rc; } rc = misc_register(&opal_prd_dev); if (rc) { pr_err("failed to register miscdev\n"); opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb); opal_message_notifier_unregister(OPAL_MSG_PRD2, &opal_prd_event_nb2); return rc; } return 0; } static int opal_prd_remove(struct platform_device *pdev) { misc_deregister(&opal_prd_dev); opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb); opal_message_notifier_unregister(OPAL_MSG_PRD2, &opal_prd_event_nb2); return 0; } static const struct of_device_id opal_prd_match[] = { { .compatible = "ibm,opal-prd" }, { }, }; static struct platform_driver opal_prd_driver = { .driver = { .name = "opal-prd", .of_match_table = opal_prd_match, }, .probe = opal_prd_probe, .remove = opal_prd_remove, }; module_platform_driver(opal_prd_driver); MODULE_DEVICE_TABLE(of, opal_prd_match); MODULE_DESCRIPTION("PowerNV OPAL runtime diagnostic driver"); MODULE_LICENSE("GPL");
linux-master
arch/powerpc/platforms/powernv/opal-prd.c
// SPDX-License-Identifier: GPL-2.0 /* * Ultravisor high level interfaces * * Copyright 2019, IBM Corporation. * */ #include <linux/init.h> #include <linux/printk.h> #include <linux/of_fdt.h> #include <linux/of.h> #include <asm/ultravisor.h> #include <asm/firmware.h> #include <asm/machdep.h> #include "powernv.h" static struct kobject *ultravisor_kobj; int __init early_init_dt_scan_ultravisor(unsigned long node, const char *uname, int depth, void *data) { if (!of_flat_dt_is_compatible(node, "ibm,ultravisor")) return 0; powerpc_firmware_features |= FW_FEATURE_ULTRAVISOR; pr_debug("Ultravisor detected!\n"); return 1; } static struct memcons *uv_memcons; static ssize_t uv_msglog_read(struct file *file, struct kobject *kobj, struct bin_attribute *bin_attr, char *to, loff_t pos, size_t count) { return memcons_copy(uv_memcons, to, pos, count); } static struct bin_attribute uv_msglog_attr = { .attr = {.name = "msglog", .mode = 0400}, .read = uv_msglog_read }; static int __init uv_init(void) { struct device_node *node; if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR)) return 0; node = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware"); if (!node) return -ENODEV; uv_memcons = memcons_init(node, "memcons"); of_node_put(node); if (!uv_memcons) return -ENOENT; uv_msglog_attr.size = memcons_get_size(uv_memcons); ultravisor_kobj = kobject_create_and_add("ultravisor", firmware_kobj); if (!ultravisor_kobj) return -ENOMEM; return sysfs_create_bin_file(ultravisor_kobj, &uv_msglog_attr); } machine_subsys_initcall(powernv, uv_init);
linux-master
arch/powerpc/platforms/powernv/ultravisor.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Firmware-Assisted Dump support on POWER platform (OPAL). * * Copyright 2019, Hari Bathini, IBM Corporation. */ #define pr_fmt(fmt) "opal fadump: " fmt #include <linux/string.h> #include <linux/seq_file.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/libfdt.h> #include <linux/mm.h> #include <linux/crash_dump.h> #include <asm/page.h> #include <asm/opal.h> #include <asm/fadump-internal.h> #include "opal-fadump.h" #ifdef CONFIG_PRESERVE_FA_DUMP /* * When dump is active but PRESERVE_FA_DUMP is enabled on the kernel, * ensure crash data is preserved in hope that the subsequent memory * preserving kernel boot is going to process this crash data. */ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) { const struct opal_fadump_mem_struct *opal_fdm_active; const __be32 *prop; unsigned long dn; u64 addr = 0; s64 ret; dn = of_get_flat_dt_subnode_by_name(node, "dump"); if (dn == -FDT_ERR_NOTFOUND) return; /* * Check if dump has been initiated on last reboot. */ prop = of_get_flat_dt_prop(dn, "mpipl-boot", NULL); if (!prop) return; ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr); if ((ret != OPAL_SUCCESS) || !addr) { pr_debug("Could not get Kernel metadata (%lld)\n", ret); return; } /* * Preserve memory only if kernel memory regions are registered * with f/w for MPIPL. */ addr = be64_to_cpu(addr); pr_debug("Kernel metadata addr: %llx\n", addr); opal_fdm_active = (void *)addr; if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) return; ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr); if ((ret != OPAL_SUCCESS) || !addr) { pr_err("Failed to get boot memory tag (%lld)\n", ret); return; } /* * Memory below this address can be used for booting a * capture kernel or petitboot kernel. Preserve everything * above this address for processing crashdump. */ fadump_conf->boot_mem_top = be64_to_cpu(addr); pr_debug("Preserve everything above %llx\n", fadump_conf->boot_mem_top); pr_info("Firmware-assisted dump is active.\n"); fadump_conf->dump_active = 1; } #else /* CONFIG_PRESERVE_FA_DUMP */ static const struct opal_fadump_mem_struct *opal_fdm_active; static const struct opal_mpipl_fadump *opal_cpu_metadata; static struct opal_fadump_mem_struct *opal_fdm; #ifdef CONFIG_OPAL_CORE extern bool kernel_initiated; #endif static int opal_fadump_unregister(struct fw_dump *fadump_conf); static void opal_fadump_update_config(struct fw_dump *fadump_conf, const struct opal_fadump_mem_struct *fdm) { pr_debug("Boot memory regions count: %d\n", be16_to_cpu(fdm->region_cnt)); /* * The destination address of the first boot memory region is the * destination address of boot memory regions. */ fadump_conf->boot_mem_dest_addr = be64_to_cpu(fdm->rgn[0].dest); pr_debug("Destination address of boot memory regions: %#016llx\n", fadump_conf->boot_mem_dest_addr); fadump_conf->fadumphdr_addr = be64_to_cpu(fdm->fadumphdr_addr); } /* * This function is called in the capture kernel to get configuration details * from metadata setup by the first kernel. */ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf, const struct opal_fadump_mem_struct *fdm) { unsigned long base, size, last_end, hole_size; int i; if (!fadump_conf->dump_active) return; last_end = 0; hole_size = 0; fadump_conf->boot_memory_size = 0; pr_debug("Boot memory regions:\n"); for (i = 0; i < be16_to_cpu(fdm->region_cnt); i++) { base = be64_to_cpu(fdm->rgn[i].src); size = be64_to_cpu(fdm->rgn[i].size); pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size); fadump_conf->boot_mem_addr[i] = base; fadump_conf->boot_mem_sz[i] = size; fadump_conf->boot_memory_size += size; hole_size += (base - last_end); last_end = base + size; } /* * Start address of reserve dump area (permanent reservation) for * re-registering FADump after dump capture. */ fadump_conf->reserve_dump_area_start = be64_to_cpu(fdm->rgn[0].dest); /* * Rarely, but it can so happen that system crashes before all * boot memory regions are registered for MPIPL. In such * cases, warn that the vmcore may not be accurate and proceed * anyway as that is the best bet considering free pages, cache * pages, user pages, etc are usually filtered out. * * Hope the memory that could not be preserved only has pages * that are usually filtered out while saving the vmcore. */ if (be16_to_cpu(fdm->region_cnt) > be16_to_cpu(fdm->registered_regions)) { pr_warn("Not all memory regions were saved!!!\n"); pr_warn(" Unsaved memory regions:\n"); i = be16_to_cpu(fdm->registered_regions); while (i < be16_to_cpu(fdm->region_cnt)) { pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n", i, be64_to_cpu(fdm->rgn[i].src), be64_to_cpu(fdm->rgn[i].size)); i++; } pr_warn("If the unsaved regions only contain pages that are filtered out (eg. free/user pages), the vmcore should still be usable.\n"); pr_warn("WARNING: If the unsaved regions contain kernel pages, the vmcore will be corrupted.\n"); } fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size); fadump_conf->boot_mem_regs_cnt = be16_to_cpu(fdm->region_cnt); opal_fadump_update_config(fadump_conf, fdm); } /* Initialize kernel metadata */ static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm) { fdm->version = OPAL_FADUMP_VERSION; fdm->region_cnt = cpu_to_be16(0); fdm->registered_regions = cpu_to_be16(0); fdm->fadumphdr_addr = cpu_to_be64(0); } static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf) { u64 addr = fadump_conf->reserve_dump_area_start; u16 reg_cnt; int i; opal_fdm = __va(fadump_conf->kernel_metadata); opal_fadump_init_metadata(opal_fdm); /* Boot memory regions */ reg_cnt = be16_to_cpu(opal_fdm->region_cnt); for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) { opal_fdm->rgn[i].src = cpu_to_be64(fadump_conf->boot_mem_addr[i]); opal_fdm->rgn[i].dest = cpu_to_be64(addr); opal_fdm->rgn[i].size = cpu_to_be64(fadump_conf->boot_mem_sz[i]); reg_cnt++; addr += fadump_conf->boot_mem_sz[i]; } opal_fdm->region_cnt = cpu_to_be16(reg_cnt); /* * Kernel metadata is passed to f/w and retrieved in capture kernel. * So, use it to save fadump header address instead of calculating it. */ opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) + fadump_conf->boot_memory_size); opal_fadump_update_config(fadump_conf, opal_fdm); return addr; } static u64 opal_fadump_get_metadata_size(void) { return PAGE_ALIGN(sizeof(struct opal_fadump_mem_struct)); } static int opal_fadump_setup_metadata(struct fw_dump *fadump_conf) { int err = 0; s64 ret; /* * Use the last page(s) in FADump memory reservation for * kernel metadata. */ fadump_conf->kernel_metadata = (fadump_conf->reserve_dump_area_start + fadump_conf->reserve_dump_area_size - opal_fadump_get_metadata_size()); pr_info("Kernel metadata addr: %llx\n", fadump_conf->kernel_metadata); /* Initialize kernel metadata before registering the address with f/w */ opal_fdm = __va(fadump_conf->kernel_metadata); opal_fadump_init_metadata(opal_fdm); /* * Register metadata address with f/w. Can be retrieved in * the capture kernel. */ ret = opal_mpipl_register_tag(OPAL_MPIPL_TAG_KERNEL, fadump_conf->kernel_metadata); if (ret != OPAL_SUCCESS) { pr_err("Failed to set kernel metadata tag!\n"); err = -EPERM; } /* * Register boot memory top address with f/w. Should be retrieved * by a kernel that intends to preserve crash'ed kernel's memory. */ ret = opal_mpipl_register_tag(OPAL_MPIPL_TAG_BOOT_MEM, fadump_conf->boot_mem_top); if (ret != OPAL_SUCCESS) { pr_err("Failed to set boot memory tag!\n"); err = -EPERM; } return err; } static u64 opal_fadump_get_bootmem_min(void) { return OPAL_FADUMP_MIN_BOOT_MEM; } static int opal_fadump_register(struct fw_dump *fadump_conf) { s64 rc = OPAL_PARAMETER; u16 registered_regs; int i, err = -EIO; registered_regs = be16_to_cpu(opal_fdm->registered_regions); for (i = 0; i < be16_to_cpu(opal_fdm->region_cnt); i++) { rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE, be64_to_cpu(opal_fdm->rgn[i].src), be64_to_cpu(opal_fdm->rgn[i].dest), be64_to_cpu(opal_fdm->rgn[i].size)); if (rc != OPAL_SUCCESS) break; registered_regs++; } opal_fdm->registered_regions = cpu_to_be16(registered_regs); switch (rc) { case OPAL_SUCCESS: pr_info("Registration is successful!\n"); fadump_conf->dump_registered = 1; err = 0; break; case OPAL_RESOURCE: /* If MAX regions limit in f/w is hit, warn and proceed. */ pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n", (be16_to_cpu(opal_fdm->region_cnt) - be16_to_cpu(opal_fdm->registered_regions))); fadump_conf->dump_registered = 1; err = 0; break; case OPAL_PARAMETER: pr_err("Failed to register. Parameter Error(%lld).\n", rc); break; case OPAL_HARDWARE: pr_err("Support not available.\n"); fadump_conf->fadump_supported = 0; fadump_conf->fadump_enabled = 0; break; default: pr_err("Failed to register. Unknown Error(%lld).\n", rc); break; } /* * If some regions were registered before OPAL_MPIPL_ADD_RANGE * OPAL call failed, unregister all regions. */ if ((err < 0) && (be16_to_cpu(opal_fdm->registered_regions) > 0)) opal_fadump_unregister(fadump_conf); return err; } static int opal_fadump_unregister(struct fw_dump *fadump_conf) { s64 rc; rc = opal_mpipl_update(OPAL_MPIPL_REMOVE_ALL, 0, 0, 0); if (rc) { pr_err("Failed to un-register - unexpected Error(%lld).\n", rc); return -EIO; } opal_fdm->registered_regions = cpu_to_be16(0); fadump_conf->dump_registered = 0; return 0; } static int opal_fadump_invalidate(struct fw_dump *fadump_conf) { s64 rc; rc = opal_mpipl_update(OPAL_MPIPL_FREE_PRESERVED_MEMORY, 0, 0, 0); if (rc) { pr_err("Failed to invalidate - unexpected Error(%lld).\n", rc); return -EIO; } fadump_conf->dump_active = 0; opal_fdm_active = NULL; return 0; } static void opal_fadump_cleanup(struct fw_dump *fadump_conf) { s64 ret; ret = opal_mpipl_register_tag(OPAL_MPIPL_TAG_KERNEL, 0); if (ret != OPAL_SUCCESS) pr_warn("Could not reset (%llu) kernel metadata tag!\n", ret); } /* * Verify if CPU state data is available. If available, do a bit of sanity * checking before processing this data. */ static bool __init is_opal_fadump_cpu_data_valid(struct fw_dump *fadump_conf) { if (!opal_cpu_metadata) return false; fadump_conf->cpu_state_data_version = be32_to_cpu(opal_cpu_metadata->cpu_data_version); fadump_conf->cpu_state_entry_size = be32_to_cpu(opal_cpu_metadata->cpu_data_size); fadump_conf->cpu_state_dest_vaddr = (u64)__va(be64_to_cpu(opal_cpu_metadata->region[0].dest)); fadump_conf->cpu_state_data_size = be64_to_cpu(opal_cpu_metadata->region[0].size); if (fadump_conf->cpu_state_data_version != HDAT_FADUMP_CPU_DATA_VER) { pr_warn("Supported CPU state data version: %u, found: %d!\n", HDAT_FADUMP_CPU_DATA_VER, fadump_conf->cpu_state_data_version); pr_warn("WARNING: F/W using newer CPU state data format!!\n"); } if ((fadump_conf->cpu_state_dest_vaddr == 0) || (fadump_conf->cpu_state_entry_size == 0) || (fadump_conf->cpu_state_entry_size > fadump_conf->cpu_state_data_size)) { pr_err("CPU state data is invalid. Ignoring!\n"); return false; } return true; } /* * Convert CPU state data saved at the time of crash into ELF notes. * * While the crashing CPU's register data is saved by the kernel, CPU state * data for all CPUs is saved by f/w. In CPU state data provided by f/w, * each register entry is of 16 bytes, a numerical identifier along with * a GPR/SPR flag in the first 8 bytes and the register value in the next * 8 bytes. For more details refer to F/W documentation. If this data is * missing or in unsupported format, append crashing CPU's register data * saved by the kernel in the PT_NOTE, to have something to work with in * the vmcore file. */ static int __init opal_fadump_build_cpu_notes(struct fw_dump *fadump_conf, struct fadump_crash_info_header *fdh) { u32 thread_pir, size_per_thread, regs_offset, regs_cnt, reg_esize; struct hdat_fadump_thread_hdr *thdr; bool is_cpu_data_valid = false; u32 num_cpus = 1, *note_buf; struct pt_regs regs; char *bufp; int rc, i; if (is_opal_fadump_cpu_data_valid(fadump_conf)) { size_per_thread = fadump_conf->cpu_state_entry_size; num_cpus = (fadump_conf->cpu_state_data_size / size_per_thread); bufp = __va(fadump_conf->cpu_state_dest_vaddr); is_cpu_data_valid = true; } rc = fadump_setup_cpu_notes_buf(num_cpus); if (rc != 0) return rc; note_buf = (u32 *)fadump_conf->cpu_notes_buf_vaddr; if (!is_cpu_data_valid) goto out; /* * Offset for register entries, entry size and registers count is * duplicated in every thread header in keeping with HDAT format. * Use these values from the first thread header. */ thdr = (struct hdat_fadump_thread_hdr *)bufp; regs_offset = (offsetof(struct hdat_fadump_thread_hdr, offset) + be32_to_cpu(thdr->offset)); reg_esize = be32_to_cpu(thdr->esize); regs_cnt = be32_to_cpu(thdr->ecnt); pr_debug("--------CPU State Data------------\n"); pr_debug("NumCpus : %u\n", num_cpus); pr_debug("\tOffset: %u, Entry size: %u, Cnt: %u\n", regs_offset, reg_esize, regs_cnt); for (i = 0; i < num_cpus; i++, bufp += size_per_thread) { thdr = (struct hdat_fadump_thread_hdr *)bufp; thread_pir = be32_to_cpu(thdr->pir); pr_debug("[%04d] PIR: 0x%x, core state: 0x%02x\n", i, thread_pir, thdr->core_state); /* * If this is kernel initiated crash, crashing_cpu would be set * appropriately and register data of the crashing CPU saved by * crashing kernel. Add this saved register data of crashing CPU * to elf notes and populate the pt_regs for the remaining CPUs * from register state data provided by firmware. */ if (fdh->crashing_cpu == thread_pir) { note_buf = fadump_regs_to_elf_notes(note_buf, &fdh->regs); pr_debug("Crashing CPU PIR: 0x%x - R1 : 0x%lx, NIP : 0x%lx\n", fdh->crashing_cpu, fdh->regs.gpr[1], fdh->regs.nip); continue; } /* * Register state data of MAX cores is provided by firmware, * but some of this cores may not be active. So, while * processing register state data, check core state and * skip threads that belong to inactive cores. */ if (thdr->core_state == HDAT_FADUMP_CORE_INACTIVE) continue; opal_fadump_read_regs((bufp + regs_offset), regs_cnt, reg_esize, true, &regs); note_buf = fadump_regs_to_elf_notes(note_buf, &regs); pr_debug("CPU PIR: 0x%x - R1 : 0x%lx, NIP : 0x%lx\n", thread_pir, regs.gpr[1], regs.nip); } out: /* * CPU state data is invalid/unsupported. Try appending crashing CPU's * register data, if it is saved by the kernel. */ if (fadump_conf->cpu_notes_buf_vaddr == (u64)note_buf) { if (fdh->crashing_cpu == FADUMP_CPU_UNKNOWN) { fadump_free_cpu_notes_buf(); return -ENODEV; } pr_warn("WARNING: appending only crashing CPU's register data\n"); note_buf = fadump_regs_to_elf_notes(note_buf, &(fdh->regs)); } final_note(note_buf); pr_debug("Updating elfcore header (%llx) with cpu notes\n", fdh->elfcorehdr_addr); fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr)); return 0; } static int __init opal_fadump_process(struct fw_dump *fadump_conf) { struct fadump_crash_info_header *fdh; int rc = -EINVAL; if (!opal_fdm_active || !fadump_conf->fadumphdr_addr) return rc; /* Validate the fadump crash info header */ fdh = __va(fadump_conf->fadumphdr_addr); if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) { pr_err("Crash info header is not valid.\n"); return rc; } #ifdef CONFIG_OPAL_CORE /* * If this is a kernel initiated crash, crashing_cpu would be set * appropriately and register data of the crashing CPU saved by * crashing kernel. Add this saved register data of crashing CPU * to elf notes and populate the pt_regs for the remaining CPUs * from register state data provided by firmware. */ if (fdh->crashing_cpu != FADUMP_CPU_UNKNOWN) kernel_initiated = true; #endif rc = opal_fadump_build_cpu_notes(fadump_conf, fdh); if (rc) return rc; /* * We are done validating dump info and elfcore header is now ready * to be exported. set elfcorehdr_addr so that vmcore module will * export the elfcore header through '/proc/vmcore'. */ elfcorehdr_addr = fdh->elfcorehdr_addr; return rc; } static void opal_fadump_region_show(struct fw_dump *fadump_conf, struct seq_file *m) { const struct opal_fadump_mem_struct *fdm_ptr; u64 dumped_bytes = 0; int i; if (fadump_conf->dump_active) fdm_ptr = opal_fdm_active; else fdm_ptr = opal_fdm; for (i = 0; i < be16_to_cpu(fdm_ptr->region_cnt); i++) { /* * Only regions that are registered for MPIPL * would have dump data. */ if ((fadump_conf->dump_active) && (i < be16_to_cpu(fdm_ptr->registered_regions))) dumped_bytes = be64_to_cpu(fdm_ptr->rgn[i].size); seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ", be64_to_cpu(fdm_ptr->rgn[i].src), be64_to_cpu(fdm_ptr->rgn[i].dest)); seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n", be64_to_cpu(fdm_ptr->rgn[i].size), dumped_bytes); } /* Dump is active. Show preserved area start address. */ if (fadump_conf->dump_active) { seq_printf(m, "\nMemory above %#016llx is reserved for saving crash dump\n", fadump_conf->boot_mem_top); } } static void opal_fadump_trigger(struct fadump_crash_info_header *fdh, const char *msg) { int rc; /* * Unlike on pSeries platform, logical CPU number is not provided * with architected register state data. So, store the crashing * CPU's PIR instead to plug the appropriate register data for * crashing CPU in the vmcore file. */ fdh->crashing_cpu = (u32)mfspr(SPRN_PIR); rc = opal_cec_reboot2(OPAL_REBOOT_MPIPL, msg); if (rc == OPAL_UNSUPPORTED) { pr_emerg("Reboot type %d not supported.\n", OPAL_REBOOT_MPIPL); } else if (rc == OPAL_HARDWARE) pr_emerg("No backend support for MPIPL!\n"); } static struct fadump_ops opal_fadump_ops = { .fadump_init_mem_struct = opal_fadump_init_mem_struct, .fadump_get_metadata_size = opal_fadump_get_metadata_size, .fadump_setup_metadata = opal_fadump_setup_metadata, .fadump_get_bootmem_min = opal_fadump_get_bootmem_min, .fadump_register = opal_fadump_register, .fadump_unregister = opal_fadump_unregister, .fadump_invalidate = opal_fadump_invalidate, .fadump_cleanup = opal_fadump_cleanup, .fadump_process = opal_fadump_process, .fadump_region_show = opal_fadump_region_show, .fadump_trigger = opal_fadump_trigger, }; void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node) { const __be32 *prop; unsigned long dn; __be64 be_addr; u64 addr = 0; int i, len; s64 ret; /* * Check if Firmware-Assisted Dump is supported. if yes, check * if dump has been initiated on last reboot. */ dn = of_get_flat_dt_subnode_by_name(node, "dump"); if (dn == -FDT_ERR_NOTFOUND) { pr_debug("FADump support is missing!\n"); return; } if (!of_flat_dt_is_compatible(dn, "ibm,opal-dump")) { pr_err("Support missing for this f/w version!\n"); return; } prop = of_get_flat_dt_prop(dn, "fw-load-area", &len); if (prop) { /* * Each f/w load area is an (address,size) pair, * 2 cells each, totalling 4 cells per range. */ for (i = 0; i < len / (sizeof(*prop) * 4); i++) { u64 base, end; base = of_read_number(prop + (i * 4) + 0, 2); end = base; end += of_read_number(prop + (i * 4) + 2, 2); if (end > OPAL_FADUMP_MIN_BOOT_MEM) { pr_err("F/W load area: 0x%llx-0x%llx\n", base, end); pr_err("F/W version not supported!\n"); return; } } } fadump_conf->ops = &opal_fadump_ops; fadump_conf->fadump_supported = 1; /* * Firmware supports 32-bit field for size. Align it to PAGE_SIZE * and request firmware to copy multiple kernel boot memory regions. */ fadump_conf->max_copy_size = ALIGN_DOWN(U32_MAX, PAGE_SIZE); /* * Check if dump has been initiated on last reboot. */ prop = of_get_flat_dt_prop(dn, "mpipl-boot", NULL); if (!prop) return; ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &be_addr); if ((ret != OPAL_SUCCESS) || !be_addr) { pr_err("Failed to get Kernel metadata (%lld)\n", ret); return; } addr = be64_to_cpu(be_addr); pr_debug("Kernel metadata addr: %llx\n", addr); opal_fdm_active = __va(addr); if (opal_fdm_active->version != OPAL_FADUMP_VERSION) { pr_warn("Supported kernel metadata version: %u, found: %d!\n", OPAL_FADUMP_VERSION, opal_fdm_active->version); pr_warn("WARNING: Kernel metadata format mismatch identified! Core file maybe corrupted..\n"); } /* Kernel regions not registered with f/w for MPIPL */ if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) { opal_fdm_active = NULL; return; } ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &be_addr); if (be_addr) { addr = be64_to_cpu(be_addr); pr_debug("CPU metadata addr: %llx\n", addr); opal_cpu_metadata = __va(addr); } pr_info("Firmware-assisted dump is active.\n"); fadump_conf->dump_active = 1; opal_fadump_get_config(fadump_conf, opal_fdm_active); } #endif /* !CONFIG_PRESERVE_FA_DUMP */
linux-master
arch/powerpc/platforms/powernv/opal-fadump.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Support PCI/PCIe on PowerNV platforms * * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/msi.h> #include <linux/iommu.h> #include <linux/sched/mm.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/msi_bitmap.h> #include <asm/ppc-pci.h> #include <asm/pnv-pci.h> #include <asm/opal.h> #include <asm/iommu.h> #include <asm/tce.h> #include <asm/firmware.h> #include <asm/eeh_event.h> #include <asm/eeh.h> #include "powernv.h" #include "pci.h" static DEFINE_MUTEX(tunnel_mutex); int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id) { struct device_node *node = np; u32 bdfn; u64 phbid; int ret; ret = of_property_read_u32(np, "reg", &bdfn); if (ret) return -ENXIO; bdfn = ((bdfn & 0x00ffff00) >> 8); for (node = np; node; node = of_get_parent(node)) { if (!PCI_DN(node)) { of_node_put(node); break; } if (!of_device_is_compatible(node, "ibm,ioda2-phb") && !of_device_is_compatible(node, "ibm,ioda3-phb") && !of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) { of_node_put(node); continue; } ret = of_property_read_u64(node, "ibm,opal-phbid", &phbid); if (ret) { of_node_put(node); return -ENXIO; } if (of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) *id = PCI_PHB_SLOT_ID(phbid); else *id = PCI_SLOT_ID(phbid, bdfn); return 0; } return -ENODEV; } EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id); int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len) { int64_t rc; if (!opal_check_token(OPAL_GET_DEVICE_TREE)) return -ENXIO; rc = opal_get_device_tree(phandle, (uint64_t)buf, len); if (rc < OPAL_SUCCESS) return -EIO; return rc; } EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree); int pnv_pci_get_presence_state(uint64_t id, uint8_t *state) { int64_t rc; if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE)) return -ENXIO; rc = opal_pci_get_presence_state(id, (uint64_t)state); if (rc != OPAL_SUCCESS) return -EIO; return 0; } EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state); int pnv_pci_get_power_state(uint64_t id, uint8_t *state) { int64_t rc; if (!opal_check_token(OPAL_PCI_GET_POWER_STATE)) return -ENXIO; rc = opal_pci_get_power_state(id, (uint64_t)state); if (rc != OPAL_SUCCESS) return -EIO; return 0; } EXPORT_SYMBOL_GPL(pnv_pci_get_power_state); int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg) { struct opal_msg m; int token, ret; int64_t rc; if (!opal_check_token(OPAL_PCI_SET_POWER_STATE)) return -ENXIO; token = opal_async_get_token_interruptible(); if (unlikely(token < 0)) return token; rc = opal_pci_set_power_state(token, id, (uint64_t)&state); if (rc == OPAL_SUCCESS) { ret = 0; goto exit; } else if (rc != OPAL_ASYNC_COMPLETION) { ret = -EIO; goto exit; } ret = opal_async_wait_response(token, &m); if (ret < 0) goto exit; if (msg) { ret = 1; memcpy(msg, &m, sizeof(m)); } exit: opal_async_release_token(token); return ret; } EXPORT_SYMBOL_GPL(pnv_pci_set_power_state); /* Nicely print the contents of the PE State Tables (PEST). */ static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size) { __be64 prevA = ULONG_MAX, prevB = ULONG_MAX; bool dup = false; int i; for (i = 0; i < pest_size; i++) { __be64 peA = be64_to_cpu(pestA[i]); __be64 peB = be64_to_cpu(pestB[i]); if (peA != prevA || peB != prevB) { if (dup) { pr_info("PE[..%03x] A/B: as above\n", i-1); dup = false; } prevA = peA; prevB = peB; if (peA & PNV_IODA_STOPPED_STATE || peB & PNV_IODA_STOPPED_STATE) pr_info("PE[%03x] A/B: %016llx %016llx\n", i, peA, peB); } else if (!dup && (peA & PNV_IODA_STOPPED_STATE || peB & PNV_IODA_STOPPED_STATE)) { dup = true; } } } static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, struct OpalIoPhbErrorCommon *common) { struct OpalIoP7IOCPhbErrorData *data; data = (struct OpalIoP7IOCPhbErrorData *)common; pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n", hose->global_number, be32_to_cpu(common->version)); if (data->brdgCtl) pr_info("brdgCtl: %08x\n", be32_to_cpu(data->brdgCtl)); if (data->portStatusReg || data->rootCmplxStatus || data->busAgentStatus) pr_info("UtlSts: %08x %08x %08x\n", be32_to_cpu(data->portStatusReg), be32_to_cpu(data->rootCmplxStatus), be32_to_cpu(data->busAgentStatus)); if (data->deviceStatus || data->slotStatus || data->linkStatus || data->devCmdStatus || data->devSecStatus) pr_info("RootSts: %08x %08x %08x %08x %08x\n", be32_to_cpu(data->deviceStatus), be32_to_cpu(data->slotStatus), be32_to_cpu(data->linkStatus), be32_to_cpu(data->devCmdStatus), be32_to_cpu(data->devSecStatus)); if (data->rootErrorStatus || data->uncorrErrorStatus || data->corrErrorStatus) pr_info("RootErrSts: %08x %08x %08x\n", be32_to_cpu(data->rootErrorStatus), be32_to_cpu(data->uncorrErrorStatus), be32_to_cpu(data->corrErrorStatus)); if (data->tlpHdr1 || data->tlpHdr2 || data->tlpHdr3 || data->tlpHdr4) pr_info("RootErrLog: %08x %08x %08x %08x\n", be32_to_cpu(data->tlpHdr1), be32_to_cpu(data->tlpHdr2), be32_to_cpu(data->tlpHdr3), be32_to_cpu(data->tlpHdr4)); if (data->sourceId || data->errorClass || data->correlator) pr_info("RootErrLog1: %08x %016llx %016llx\n", be32_to_cpu(data->sourceId), be64_to_cpu(data->errorClass), be64_to_cpu(data->correlator)); if (data->p7iocPlssr || data->p7iocCsr) pr_info("PhbSts: %016llx %016llx\n", be64_to_cpu(data->p7iocPlssr), be64_to_cpu(data->p7iocCsr)); if (data->lemFir) pr_info("Lem: %016llx %016llx %016llx\n", be64_to_cpu(data->lemFir), be64_to_cpu(data->lemErrorMask), be64_to_cpu(data->lemWOF)); if (data->phbErrorStatus) pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->phbErrorStatus), be64_to_cpu(data->phbFirstErrorStatus), be64_to_cpu(data->phbErrorLog0), be64_to_cpu(data->phbErrorLog1)); if (data->mmioErrorStatus) pr_info("OutErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->mmioErrorStatus), be64_to_cpu(data->mmioFirstErrorStatus), be64_to_cpu(data->mmioErrorLog0), be64_to_cpu(data->mmioErrorLog1)); if (data->dma0ErrorStatus) pr_info("InAErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->dma0ErrorStatus), be64_to_cpu(data->dma0FirstErrorStatus), be64_to_cpu(data->dma0ErrorLog0), be64_to_cpu(data->dma0ErrorLog1)); if (data->dma1ErrorStatus) pr_info("InBErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->dma1ErrorStatus), be64_to_cpu(data->dma1FirstErrorStatus), be64_to_cpu(data->dma1ErrorLog0), be64_to_cpu(data->dma1ErrorLog1)); pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS); } static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, struct OpalIoPhbErrorCommon *common) { struct OpalIoPhb3ErrorData *data; data = (struct OpalIoPhb3ErrorData*)common; pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n", hose->global_number, be32_to_cpu(common->version)); if (data->brdgCtl) pr_info("brdgCtl: %08x\n", be32_to_cpu(data->brdgCtl)); if (data->portStatusReg || data->rootCmplxStatus || data->busAgentStatus) pr_info("UtlSts: %08x %08x %08x\n", be32_to_cpu(data->portStatusReg), be32_to_cpu(data->rootCmplxStatus), be32_to_cpu(data->busAgentStatus)); if (data->deviceStatus || data->slotStatus || data->linkStatus || data->devCmdStatus || data->devSecStatus) pr_info("RootSts: %08x %08x %08x %08x %08x\n", be32_to_cpu(data->deviceStatus), be32_to_cpu(data->slotStatus), be32_to_cpu(data->linkStatus), be32_to_cpu(data->devCmdStatus), be32_to_cpu(data->devSecStatus)); if (data->rootErrorStatus || data->uncorrErrorStatus || data->corrErrorStatus) pr_info("RootErrSts: %08x %08x %08x\n", be32_to_cpu(data->rootErrorStatus), be32_to_cpu(data->uncorrErrorStatus), be32_to_cpu(data->corrErrorStatus)); if (data->tlpHdr1 || data->tlpHdr2 || data->tlpHdr3 || data->tlpHdr4) pr_info("RootErrLog: %08x %08x %08x %08x\n", be32_to_cpu(data->tlpHdr1), be32_to_cpu(data->tlpHdr2), be32_to_cpu(data->tlpHdr3), be32_to_cpu(data->tlpHdr4)); if (data->sourceId || data->errorClass || data->correlator) pr_info("RootErrLog1: %08x %016llx %016llx\n", be32_to_cpu(data->sourceId), be64_to_cpu(data->errorClass), be64_to_cpu(data->correlator)); if (data->nFir) pr_info("nFir: %016llx %016llx %016llx\n", be64_to_cpu(data->nFir), be64_to_cpu(data->nFirMask), be64_to_cpu(data->nFirWOF)); if (data->phbPlssr || data->phbCsr) pr_info("PhbSts: %016llx %016llx\n", be64_to_cpu(data->phbPlssr), be64_to_cpu(data->phbCsr)); if (data->lemFir) pr_info("Lem: %016llx %016llx %016llx\n", be64_to_cpu(data->lemFir), be64_to_cpu(data->lemErrorMask), be64_to_cpu(data->lemWOF)); if (data->phbErrorStatus) pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->phbErrorStatus), be64_to_cpu(data->phbFirstErrorStatus), be64_to_cpu(data->phbErrorLog0), be64_to_cpu(data->phbErrorLog1)); if (data->mmioErrorStatus) pr_info("OutErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->mmioErrorStatus), be64_to_cpu(data->mmioFirstErrorStatus), be64_to_cpu(data->mmioErrorLog0), be64_to_cpu(data->mmioErrorLog1)); if (data->dma0ErrorStatus) pr_info("InAErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->dma0ErrorStatus), be64_to_cpu(data->dma0FirstErrorStatus), be64_to_cpu(data->dma0ErrorLog0), be64_to_cpu(data->dma0ErrorLog1)); if (data->dma1ErrorStatus) pr_info("InBErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->dma1ErrorStatus), be64_to_cpu(data->dma1FirstErrorStatus), be64_to_cpu(data->dma1ErrorLog0), be64_to_cpu(data->dma1ErrorLog1)); pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS); } static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose, struct OpalIoPhbErrorCommon *common) { struct OpalIoPhb4ErrorData *data; data = (struct OpalIoPhb4ErrorData*)common; pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n", hose->global_number, be32_to_cpu(common->version)); if (data->brdgCtl) pr_info("brdgCtl: %08x\n", be32_to_cpu(data->brdgCtl)); if (data->deviceStatus || data->slotStatus || data->linkStatus || data->devCmdStatus || data->devSecStatus) pr_info("RootSts: %08x %08x %08x %08x %08x\n", be32_to_cpu(data->deviceStatus), be32_to_cpu(data->slotStatus), be32_to_cpu(data->linkStatus), be32_to_cpu(data->devCmdStatus), be32_to_cpu(data->devSecStatus)); if (data->rootErrorStatus || data->uncorrErrorStatus || data->corrErrorStatus) pr_info("RootErrSts: %08x %08x %08x\n", be32_to_cpu(data->rootErrorStatus), be32_to_cpu(data->uncorrErrorStatus), be32_to_cpu(data->corrErrorStatus)); if (data->tlpHdr1 || data->tlpHdr2 || data->tlpHdr3 || data->tlpHdr4) pr_info("RootErrLog: %08x %08x %08x %08x\n", be32_to_cpu(data->tlpHdr1), be32_to_cpu(data->tlpHdr2), be32_to_cpu(data->tlpHdr3), be32_to_cpu(data->tlpHdr4)); if (data->sourceId) pr_info("sourceId: %08x\n", be32_to_cpu(data->sourceId)); if (data->nFir) pr_info("nFir: %016llx %016llx %016llx\n", be64_to_cpu(data->nFir), be64_to_cpu(data->nFirMask), be64_to_cpu(data->nFirWOF)); if (data->phbPlssr || data->phbCsr) pr_info("PhbSts: %016llx %016llx\n", be64_to_cpu(data->phbPlssr), be64_to_cpu(data->phbCsr)); if (data->lemFir) pr_info("Lem: %016llx %016llx %016llx\n", be64_to_cpu(data->lemFir), be64_to_cpu(data->lemErrorMask), be64_to_cpu(data->lemWOF)); if (data->phbErrorStatus) pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->phbErrorStatus), be64_to_cpu(data->phbFirstErrorStatus), be64_to_cpu(data->phbErrorLog0), be64_to_cpu(data->phbErrorLog1)); if (data->phbTxeErrorStatus) pr_info("PhbTxeErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->phbTxeErrorStatus), be64_to_cpu(data->phbTxeFirstErrorStatus), be64_to_cpu(data->phbTxeErrorLog0), be64_to_cpu(data->phbTxeErrorLog1)); if (data->phbRxeArbErrorStatus) pr_info("RxeArbErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->phbRxeArbErrorStatus), be64_to_cpu(data->phbRxeArbFirstErrorStatus), be64_to_cpu(data->phbRxeArbErrorLog0), be64_to_cpu(data->phbRxeArbErrorLog1)); if (data->phbRxeMrgErrorStatus) pr_info("RxeMrgErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->phbRxeMrgErrorStatus), be64_to_cpu(data->phbRxeMrgFirstErrorStatus), be64_to_cpu(data->phbRxeMrgErrorLog0), be64_to_cpu(data->phbRxeMrgErrorLog1)); if (data->phbRxeTceErrorStatus) pr_info("RxeTceErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->phbRxeTceErrorStatus), be64_to_cpu(data->phbRxeTceFirstErrorStatus), be64_to_cpu(data->phbRxeTceErrorLog0), be64_to_cpu(data->phbRxeTceErrorLog1)); if (data->phbPblErrorStatus) pr_info("PblErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->phbPblErrorStatus), be64_to_cpu(data->phbPblFirstErrorStatus), be64_to_cpu(data->phbPblErrorLog0), be64_to_cpu(data->phbPblErrorLog1)); if (data->phbPcieDlpErrorStatus) pr_info("PcieDlp: %016llx %016llx %016llx\n", be64_to_cpu(data->phbPcieDlpErrorLog1), be64_to_cpu(data->phbPcieDlpErrorLog2), be64_to_cpu(data->phbPcieDlpErrorStatus)); if (data->phbRegbErrorStatus) pr_info("RegbErr: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->phbRegbErrorStatus), be64_to_cpu(data->phbRegbFirstErrorStatus), be64_to_cpu(data->phbRegbErrorLog0), be64_to_cpu(data->phbRegbErrorLog1)); pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS); } void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, unsigned char *log_buff) { struct OpalIoPhbErrorCommon *common; if (!hose || !log_buff) return; common = (struct OpalIoPhbErrorCommon *)log_buff; switch (be32_to_cpu(common->ioType)) { case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: pnv_pci_dump_p7ioc_diag_data(hose, common); break; case OPAL_PHB_ERROR_DATA_TYPE_PHB3: pnv_pci_dump_phb3_diag_data(hose, common); break; case OPAL_PHB_ERROR_DATA_TYPE_PHB4: pnv_pci_dump_phb4_diag_data(hose, common); break; default: pr_warn("%s: Unrecognized ioType %d\n", __func__, be32_to_cpu(common->ioType)); } } static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no) { unsigned long flags, rc; int has_diag, ret = 0; spin_lock_irqsave(&phb->lock, flags); /* Fetch PHB diag-data */ rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data, phb->diag_data_size); has_diag = (rc == OPAL_SUCCESS); /* If PHB supports compound PE, to handle it */ if (phb->unfreeze_pe) { ret = phb->unfreeze_pe(phb, pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); } else { rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); if (rc) { pr_warn("%s: Failure %ld clearing frozen " "PHB#%x-PE#%x\n", __func__, rc, phb->hose->global_number, pe_no); ret = -EIO; } } /* * For now, let's only display the diag buffer when we fail to clear * the EEH status. We'll do more sensible things later when we have * proper EEH support. We need to make sure we don't pollute ourselves * with the normal errors generated when probing empty slots */ if (has_diag && ret) pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data); spin_unlock_irqrestore(&phb->lock, flags); } static void pnv_pci_config_check_eeh(struct pci_dn *pdn) { struct pnv_phb *phb = pdn->phb->private_data; u8 fstate = 0; __be16 pcierr = 0; unsigned int pe_no; s64 rc; /* * Get the PE#. During the PCI probe stage, we might not * setup that yet. So all ER errors should be mapped to * reserved PE. */ pe_no = pdn->pe_number; if (pe_no == IODA_INVALID_PE) { pe_no = phb->ioda.reserved_pe_idx; } /* * Fetch frozen state. If the PHB support compound PE, * we need handle that case. */ if (phb->get_pe_state) { fstate = phb->get_pe_state(phb, pe_no); } else { rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, &fstate, &pcierr, NULL); if (rc) { pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n", __func__, rc, phb->hose->global_number, pe_no); return; } } pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n", (pdn->busno << 8) | (pdn->devfn), pe_no, fstate); /* Clear the frozen state if applicable */ if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE || fstate == OPAL_EEH_STOPPED_DMA_FREEZE || fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) { /* * If PHB supports compound PE, freeze it for * consistency. */ if (phb->freeze_pe) phb->freeze_pe(phb, pe_no); pnv_pci_handle_eeh_config(phb, pe_no); } } int pnv_pci_cfg_read(struct pci_dn *pdn, int where, int size, u32 *val) { struct pnv_phb *phb = pdn->phb->private_data; u32 bdfn = (pdn->busno << 8) | pdn->devfn; s64 rc; switch (size) { case 1: { u8 v8; rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8); *val = (rc == OPAL_SUCCESS) ? v8 : 0xff; break; } case 2: { __be16 v16; rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where, &v16); *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff; break; } case 4: { __be32 v32; rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32); *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff; break; } default: return PCIBIOS_FUNC_NOT_SUPPORTED; } pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", __func__, pdn->busno, pdn->devfn, where, size, *val); return PCIBIOS_SUCCESSFUL; } int pnv_pci_cfg_write(struct pci_dn *pdn, int where, int size, u32 val) { struct pnv_phb *phb = pdn->phb->private_data; u32 bdfn = (pdn->busno << 8) | pdn->devfn; pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n", __func__, pdn->busno, pdn->devfn, where, size, val); switch (size) { case 1: opal_pci_config_write_byte(phb->opal_id, bdfn, where, val); break; case 2: opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val); break; case 4: opal_pci_config_write_word(phb->opal_id, bdfn, where, val); break; default: return PCIBIOS_FUNC_NOT_SUPPORTED; } return PCIBIOS_SUCCESSFUL; } #ifdef CONFIG_EEH static bool pnv_pci_cfg_check(struct pci_dn *pdn) { struct eeh_dev *edev = NULL; struct pnv_phb *phb = pdn->phb->private_data; /* EEH not enabled ? */ if (!(phb->flags & PNV_PHB_FLAG_EEH)) return true; /* PE reset or device removed ? */ edev = pdn->edev; if (edev) { if (edev->pe && (edev->pe->state & EEH_PE_CFG_BLOCKED)) return false; if (edev->mode & EEH_DEV_REMOVED) return false; } return true; } #else static inline pnv_pci_cfg_check(struct pci_dn *pdn) { return true; } #endif /* CONFIG_EEH */ static int pnv_pci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { struct pci_dn *pdn; struct pnv_phb *phb; int ret; *val = 0xFFFFFFFF; pdn = pci_get_pdn_by_devfn(bus, devfn); if (!pdn) return PCIBIOS_DEVICE_NOT_FOUND; if (!pnv_pci_cfg_check(pdn)) return PCIBIOS_DEVICE_NOT_FOUND; ret = pnv_pci_cfg_read(pdn, where, size, val); phb = pdn->phb->private_data; if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) { if (*val == EEH_IO_ERROR_VALUE(size) && eeh_dev_check_failure(pdn->edev)) return PCIBIOS_DEVICE_NOT_FOUND; } else { pnv_pci_config_check_eeh(pdn); } return ret; } static int pnv_pci_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct pci_dn *pdn; struct pnv_phb *phb; int ret; pdn = pci_get_pdn_by_devfn(bus, devfn); if (!pdn) return PCIBIOS_DEVICE_NOT_FOUND; if (!pnv_pci_cfg_check(pdn)) return PCIBIOS_DEVICE_NOT_FOUND; ret = pnv_pci_cfg_write(pdn, where, size, val); phb = pdn->phb->private_data; if (!(phb->flags & PNV_PHB_FLAG_EEH)) pnv_pci_config_check_eeh(pdn); return ret; } struct pci_ops pnv_pci_ops = { .read = pnv_pci_read_config, .write = pnv_pci_write_config, }; struct iommu_table *pnv_pci_table_alloc(int nid) { struct iommu_table *tbl; tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid); if (!tbl) return NULL; INIT_LIST_HEAD_RCU(&tbl->it_group_list); kref_init(&tbl->it_kref); return tbl; } struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); return of_node_get(hose->dn); } EXPORT_SYMBOL(pnv_pci_get_phb_node); int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable) { struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); u64 tunnel_bar; __be64 val; int rc; if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR)) return -ENXIO; if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR)) return -ENXIO; mutex_lock(&tunnel_mutex); rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val); if (rc != OPAL_SUCCESS) { rc = -EIO; goto out; } tunnel_bar = be64_to_cpu(val); if (enable) { /* * Only one device per PHB can use atomics. * Our policy is first-come, first-served. */ if (tunnel_bar) { if (tunnel_bar != addr) rc = -EBUSY; else rc = 0; /* Setting same address twice is ok */ goto out; } } else { /* * The device that owns atomics and wants to release * them must pass the same address with enable == 0. */ if (tunnel_bar != addr) { rc = -EPERM; goto out; } addr = 0x0ULL; } rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr); rc = opal_error_code(rc); out: mutex_unlock(&tunnel_mutex); return rc; } EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar); void pnv_pci_shutdown(void) { struct pci_controller *hose; list_for_each_entry(hose, &hose_list, list_node) if (hose->controller_ops.shutdown) hose->controller_ops.shutdown(hose); } /* Fixup wrong class code in p7ioc and p8 root complex */ static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk); void __init pnv_pci_init(void) { struct device_node *np; pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN); /* If we don't have OPAL, eg. in sim, just skip PCI probe */ if (!firmware_has_feature(FW_FEATURE_OPAL)) return; #ifdef CONFIG_PCIEPORTBUS /* * On PowerNV PCIe devices are (currently) managed in cooperation * with firmware. This isn't *strictly* required, but there's enough * assumptions baked into both firmware and the platform code that * it's unwise to allow the portbus services to be used. * * We need to fix this eventually, but for now set this flag to disable * the portbus driver. The AER service isn't required since that AER * events are handled via EEH. The pciehp hotplug driver can't work * without kernel changes (and portbus binding breaks pnv_php). The * other services also require some thinking about how we're going * to integrate them. */ pcie_ports_disabled = true; #endif /* Look for ioda2 built-in PHB3's */ for_each_compatible_node(np, NULL, "ibm,ioda2-phb") pnv_pci_init_ioda2_phb(np); /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */ for_each_compatible_node(np, NULL, "ibm,ioda3-phb") pnv_pci_init_ioda2_phb(np); /* Look for NPU2 OpenCAPI PHBs */ for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb") pnv_pci_init_npu2_opencapi_phb(np); /* Configure IOMMU DMA hooks */ set_pci_dma_ops(&dma_iommu_ops); }
linux-master
arch/powerpc/platforms/powernv/pci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV nvram code. * * Copyright 2011 IBM Corp. */ #define DEBUG #include <linux/delay.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/of.h> #include <asm/opal.h> #include <asm/nvram.h> #include <asm/machdep.h> static unsigned int nvram_size; static ssize_t opal_nvram_size(void) { return nvram_size; } static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index) { s64 rc; int off; if (*index >= nvram_size) return 0; off = *index; if ((off + count) > nvram_size) count = nvram_size - off; rc = opal_read_nvram(__pa(buf), count, off); if (rc != OPAL_SUCCESS) return -EIO; *index += count; return count; } /* * This can be called in the panic path with interrupts off, so use * mdelay in that case. */ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index) { s64 rc = OPAL_BUSY; int off; if (*index >= nvram_size) return 0; off = *index; if ((off + count) > nvram_size) count = nvram_size - off; while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_write_nvram(__pa(buf), count, off); if (rc == OPAL_BUSY_EVENT) { if (in_interrupt() || irqs_disabled()) mdelay(OPAL_BUSY_DELAY_MS); else msleep(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); } else if (rc == OPAL_BUSY) { if (in_interrupt() || irqs_disabled()) mdelay(OPAL_BUSY_DELAY_MS); else msleep(OPAL_BUSY_DELAY_MS); } } if (rc) return -EIO; *index += count; return count; } static int __init opal_nvram_init_log_partitions(void) { /* Scan nvram for partitions */ nvram_scan_partitions(); nvram_init_oops_partition(0); return 0; } machine_arch_initcall(powernv, opal_nvram_init_log_partitions); void __init opal_nvram_init(void) { struct device_node *np; const __be32 *nbytes_p; np = of_find_compatible_node(NULL, NULL, "ibm,opal-nvram"); if (np == NULL) return; nbytes_p = of_get_property(np, "#bytes", NULL); if (!nbytes_p) { of_node_put(np); return; } nvram_size = be32_to_cpup(nbytes_p); pr_info("OPAL nvram setup, %u bytes\n", nvram_size); of_node_put(np); ppc_md.nvram_read = opal_nvram_read; ppc_md.nvram_write = opal_nvram_write; ppc_md.nvram_size = opal_nvram_size; }
linux-master
arch/powerpc/platforms/powernv/opal-nvram.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV LPC bus handling. * * Copyright 2013 IBM Corp. */ #include <linux/kernel.h> #include <linux/of.h> #include <linux/bug.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/debugfs.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/opal.h> #include <asm/prom.h> #include <linux/uaccess.h> #include <asm/isa-bridge.h> static int opal_lpc_chip_id = -1; static u8 opal_lpc_inb(unsigned long port) { int64_t rc; __be32 data; if (opal_lpc_chip_id < 0 || port > 0xffff) return 0xff; rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); return rc ? 0xff : be32_to_cpu(data); } static __le16 __opal_lpc_inw(unsigned long port) { int64_t rc; __be32 data; if (opal_lpc_chip_id < 0 || port > 0xfffe) return 0xffff; if (port & 1) return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); return rc ? 0xffff : be32_to_cpu(data); } static u16 opal_lpc_inw(unsigned long port) { return le16_to_cpu(__opal_lpc_inw(port)); } static __le32 __opal_lpc_inl(unsigned long port) { int64_t rc; __be32 data; if (opal_lpc_chip_id < 0 || port > 0xfffc) return 0xffffffff; if (port & 3) return (__le32)opal_lpc_inb(port ) << 24 | (__le32)opal_lpc_inb(port + 1) << 16 | (__le32)opal_lpc_inb(port + 2) << 8 | opal_lpc_inb(port + 3); rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); return rc ? 0xffffffff : be32_to_cpu(data); } static u32 opal_lpc_inl(unsigned long port) { return le32_to_cpu(__opal_lpc_inl(port)); } static void opal_lpc_outb(u8 val, unsigned long port) { if (opal_lpc_chip_id < 0 || port > 0xffff) return; opal_lpc_write(opal_lpc_chip_id, OPAL_LPC_IO, port, val, 1); } static void __opal_lpc_outw(__le16 val, unsigned long port) { if (opal_lpc_chip_id < 0 || port > 0xfffe) return; if (port & 1) { opal_lpc_outb(val >> 8, port); opal_lpc_outb(val , port + 1); return; } opal_lpc_write(opal_lpc_chip_id, OPAL_LPC_IO, port, val, 2); } static void opal_lpc_outw(u16 val, unsigned long port) { __opal_lpc_outw(cpu_to_le16(val), port); } static void __opal_lpc_outl(__le32 val, unsigned long port) { if (opal_lpc_chip_id < 0 || port > 0xfffc) return; if (port & 3) { opal_lpc_outb(val >> 24, port); opal_lpc_outb(val >> 16, port + 1); opal_lpc_outb(val >> 8, port + 2); opal_lpc_outb(val , port + 3); return; } opal_lpc_write(opal_lpc_chip_id, OPAL_LPC_IO, port, val, 4); } static void opal_lpc_outl(u32 val, unsigned long port) { __opal_lpc_outl(cpu_to_le32(val), port); } static void opal_lpc_insb(unsigned long p, void *b, unsigned long c) { u8 *ptr = b; while(c--) *(ptr++) = opal_lpc_inb(p); } static void opal_lpc_insw(unsigned long p, void *b, unsigned long c) { __le16 *ptr = b; while(c--) *(ptr++) = __opal_lpc_inw(p); } static void opal_lpc_insl(unsigned long p, void *b, unsigned long c) { __le32 *ptr = b; while(c--) *(ptr++) = __opal_lpc_inl(p); } static void opal_lpc_outsb(unsigned long p, const void *b, unsigned long c) { const u8 *ptr = b; while(c--) opal_lpc_outb(*(ptr++), p); } static void opal_lpc_outsw(unsigned long p, const void *b, unsigned long c) { const __le16 *ptr = b; while(c--) __opal_lpc_outw(*(ptr++), p); } static void opal_lpc_outsl(unsigned long p, const void *b, unsigned long c) { const __le32 *ptr = b; while(c--) __opal_lpc_outl(*(ptr++), p); } static const struct ppc_pci_io opal_lpc_io = { .inb = opal_lpc_inb, .inw = opal_lpc_inw, .inl = opal_lpc_inl, .outb = opal_lpc_outb, .outw = opal_lpc_outw, .outl = opal_lpc_outl, .insb = opal_lpc_insb, .insw = opal_lpc_insw, .insl = opal_lpc_insl, .outsb = opal_lpc_outsb, .outsw = opal_lpc_outsw, .outsl = opal_lpc_outsl, }; #ifdef CONFIG_DEBUG_FS struct lpc_debugfs_entry { enum OpalLPCAddressType lpc_type; }; static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct lpc_debugfs_entry *lpc = filp->private_data; u32 data, pos, len, todo; int rc; if (!access_ok(ubuf, count)) return -EFAULT; todo = count; while (todo) { pos = *ppos; /* * Select access size based on count and alignment and * access type. IO and MEM only support byte accesses, * FW supports all 3. */ len = 1; if (lpc->lpc_type == OPAL_LPC_FW) { if (todo > 3 && (pos & 3) == 0) len = 4; else if (todo > 1 && (pos & 1) == 0) len = 2; } rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos, &data, len); if (rc) return -ENXIO; /* * Now there is some trickery with the data returned by OPAL * as it's the desired data right justified in a 32-bit BE * word. * * This is a very bad interface and I'm to blame for it :-( * * So we can't just apply a 32-bit swap to what comes from OPAL, * because user space expects the *bytes* to be in their proper * respective positions (ie, LPC position). * * So what we really want to do here is to shift data right * appropriately on a LE kernel. * * IE. If the LPC transaction has bytes B0, B1, B2 and B3 in that * order, we have in memory written to by OPAL at the "data" * pointer: * * Bytes: OPAL "data" LE "data" * 32-bit: B0 B1 B2 B3 B0B1B2B3 B3B2B1B0 * 16-bit: B0 B1 0000B0B1 B1B00000 * 8-bit: B0 000000B0 B0000000 * * So a BE kernel will have the leftmost of the above in the MSB * and rightmost in the LSB and can just then "cast" the u32 "data" * down to the appropriate quantity and write it. * * However, an LE kernel can't. It doesn't need to swap because a * load from data followed by a store to user are going to preserve * the byte ordering which is the wire byte order which is what the * user wants, but in order to "crop" to the right size, we need to * shift right first. */ switch(len) { case 4: rc = __put_user((u32)data, (u32 __user *)ubuf); break; case 2: #ifdef __LITTLE_ENDIAN__ data >>= 16; #endif rc = __put_user((u16)data, (u16 __user *)ubuf); break; default: #ifdef __LITTLE_ENDIAN__ data >>= 24; #endif rc = __put_user((u8)data, (u8 __user *)ubuf); break; } if (rc) return -EFAULT; *ppos += len; ubuf += len; todo -= len; } return count; } static ssize_t lpc_debug_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { struct lpc_debugfs_entry *lpc = filp->private_data; u32 data, pos, len, todo; int rc; if (!access_ok(ubuf, count)) return -EFAULT; todo = count; while (todo) { pos = *ppos; /* * Select access size based on count and alignment and * access type. IO and MEM only support byte acceses, * FW supports all 3. */ len = 1; if (lpc->lpc_type == OPAL_LPC_FW) { if (todo > 3 && (pos & 3) == 0) len = 4; else if (todo > 1 && (pos & 1) == 0) len = 2; } /* * Similarly to the read case, we have some trickery here but * it's different to handle. We need to pass the value to OPAL in * a register whose layout depends on the access size. We want * to reproduce the memory layout of the user, however we aren't * doing a load from user and a store to another memory location * which would achieve that. Here we pass the value to OPAL via * a register which is expected to contain the "BE" interpretation * of the byte sequence. IE: for a 32-bit access, byte 0 should be * in the MSB. So here we *do* need to byteswap on LE. * * User bytes: LE "data" OPAL "data" * 32-bit: B0 B1 B2 B3 B3B2B1B0 B0B1B2B3 * 16-bit: B0 B1 0000B1B0 0000B0B1 * 8-bit: B0 000000B0 000000B0 */ switch(len) { case 4: rc = __get_user(data, (u32 __user *)ubuf); data = cpu_to_be32(data); break; case 2: rc = __get_user(data, (u16 __user *)ubuf); data = cpu_to_be16(data); break; default: rc = __get_user(data, (u8 __user *)ubuf); break; } if (rc) return -EFAULT; rc = opal_lpc_write(opal_lpc_chip_id, lpc->lpc_type, pos, data, len); if (rc) return -ENXIO; *ppos += len; ubuf += len; todo -= len; } return count; } static const struct file_operations lpc_fops = { .read = lpc_debug_read, .write = lpc_debug_write, .open = simple_open, .llseek = default_llseek, }; static int opal_lpc_debugfs_create_type(struct dentry *folder, const char *fname, enum OpalLPCAddressType type) { struct lpc_debugfs_entry *entry; entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; entry->lpc_type = type; debugfs_create_file(fname, 0600, folder, entry, &lpc_fops); return 0; } static int opal_lpc_init_debugfs(void) { struct dentry *root; int rc = 0; if (opal_lpc_chip_id < 0) return -ENODEV; root = debugfs_create_dir("lpc", arch_debugfs_dir); rc |= opal_lpc_debugfs_create_type(root, "io", OPAL_LPC_IO); rc |= opal_lpc_debugfs_create_type(root, "mem", OPAL_LPC_MEM); rc |= opal_lpc_debugfs_create_type(root, "fw", OPAL_LPC_FW); return rc; } machine_device_initcall(powernv, opal_lpc_init_debugfs); #endif /* CONFIG_DEBUG_FS */ void __init opal_lpc_init(void) { struct device_node *np; /* * Look for a Power8 LPC bus tagged as "primary", * we currently support only one though the OPAL APIs * support any number. */ for_each_compatible_node(np, NULL, "ibm,power8-lpc") { if (!of_device_is_available(np)) continue; if (!of_get_property(np, "primary", NULL)) continue; opal_lpc_chip_id = of_get_ibm_chip_id(np); of_node_put(np); break; } if (opal_lpc_chip_id < 0) return; /* Does it support direct mapping ? */ if (of_property_present(np, "ranges")) { pr_info("OPAL: Found memory mapped LPC bus on chip %d\n", opal_lpc_chip_id); isa_bridge_init_non_pci(np); } else { pr_info("OPAL: Found non-mapped LPC bus on chip %d\n", opal_lpc_chip_id); /* Setup special IO ops */ ppc_pci_io = opal_lpc_io; isa_io_special = true; } }
linux-master
arch/powerpc/platforms/powernv/opal-lpc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV OPAL high level interfaces * * Copyright 2011 IBM Corp. */ #define pr_fmt(fmt) "opal: " fmt #include <linux/printk.h> #include <linux/types.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/kobject.h> #include <linux/delay.h> #include <linux/memblock.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/kmsg_dump.h> #include <linux/console.h> #include <linux/sched/debug.h> #include <asm/machdep.h> #include <asm/opal.h> #include <asm/firmware.h> #include <asm/mce.h> #include <asm/imc-pmu.h> #include <asm/bug.h> #include "powernv.h" #define OPAL_MSG_QUEUE_MAX 16 struct opal_msg_node { struct list_head list; struct opal_msg msg; }; static DEFINE_SPINLOCK(msg_list_lock); static LIST_HEAD(msg_list); /* /sys/firmware/opal */ struct kobject *opal_kobj; struct opal { u64 base; u64 entry; u64 size; } opal; struct mcheck_recoverable_range { u64 start_addr; u64 end_addr; u64 recover_addr; }; static int msg_list_size; static struct mcheck_recoverable_range *mc_recoverable_range; static int mc_recoverable_range_len; struct device_node *opal_node; static DEFINE_SPINLOCK(opal_write_lock); static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX]; static uint32_t opal_heartbeat; static struct task_struct *kopald_tsk; static struct opal_msg *opal_msg; static u32 opal_msg_size __ro_after_init; void __init opal_configure_cores(void) { u64 reinit_flags = 0; /* Do the actual re-init, This will clobber all FPRs, VRs, etc... * * It will preserve non volatile GPRs and HSPRG0/1. It will * also restore HIDs and other SPRs to their original value * but it might clobber a bunch. */ #ifdef __BIG_ENDIAN__ reinit_flags |= OPAL_REINIT_CPUS_HILE_BE; #else reinit_flags |= OPAL_REINIT_CPUS_HILE_LE; #endif /* * POWER9 always support running hash: * ie. Host hash supports hash guests * Host radix supports hash/radix guests */ if (early_cpu_has_feature(CPU_FTR_ARCH_300)) { reinit_flags |= OPAL_REINIT_CPUS_MMU_HASH; if (early_radix_enabled()) reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX; } opal_reinit_cpus(reinit_flags); /* Restore some bits */ if (cur_cpu_spec->cpu_restore) cur_cpu_spec->cpu_restore(); } int __init early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data) { const void *basep, *entryp, *sizep; int basesz, entrysz, runtimesz; if (depth != 1 || strcmp(uname, "ibm,opal") != 0) return 0; basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz); entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz); sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz); if (!basep || !entryp || !sizep) return 1; opal.base = of_read_number(basep, basesz/4); opal.entry = of_read_number(entryp, entrysz/4); opal.size = of_read_number(sizep, runtimesz/4); pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n", opal.base, basep, basesz); pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n", opal.entry, entryp, entrysz); pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n", opal.size, sizep, runtimesz); if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { powerpc_firmware_features |= FW_FEATURE_OPAL; pr_debug("OPAL detected !\n"); } else { panic("OPAL != V3 detected, no longer supported.\n"); } return 1; } int __init early_init_dt_scan_recoverable_ranges(unsigned long node, const char *uname, int depth, void *data) { int i, psize, size; const __be32 *prop; if (depth != 1 || strcmp(uname, "ibm,opal") != 0) return 0; prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize); if (!prop) return 1; pr_debug("Found machine check recoverable ranges.\n"); /* * Calculate number of available entries. * * Each recoverable address range entry is (start address, len, * recovery address), 2 cells each for start and recovery address, * 1 cell for len, totalling 5 cells per entry. */ mc_recoverable_range_len = psize / (sizeof(*prop) * 5); /* Sanity check */ if (!mc_recoverable_range_len) return 1; /* Size required to hold all the entries. */ size = mc_recoverable_range_len * sizeof(struct mcheck_recoverable_range); /* * Allocate a buffer to hold the MC recoverable ranges. */ mc_recoverable_range = memblock_alloc(size, __alignof__(u64)); if (!mc_recoverable_range) panic("%s: Failed to allocate %u bytes align=0x%lx\n", __func__, size, __alignof__(u64)); for (i = 0; i < mc_recoverable_range_len; i++) { mc_recoverable_range[i].start_addr = of_read_number(prop + (i * 5) + 0, 2); mc_recoverable_range[i].end_addr = mc_recoverable_range[i].start_addr + of_read_number(prop + (i * 5) + 2, 1); mc_recoverable_range[i].recover_addr = of_read_number(prop + (i * 5) + 3, 2); pr_debug("Machine check recoverable range: %llx..%llx: %llx\n", mc_recoverable_range[i].start_addr, mc_recoverable_range[i].end_addr, mc_recoverable_range[i].recover_addr); } return 1; } static int __init opal_register_exception_handlers(void) { #ifdef __BIG_ENDIAN__ u64 glue; if (!(powerpc_firmware_features & FW_FEATURE_OPAL)) return -ENODEV; /* Hookup some exception handlers except machine check. We use the * fwnmi area at 0x7000 to provide the glue space to OPAL */ glue = 0x7000; /* * Only ancient OPAL firmware requires this. * Specifically, firmware from FW810.00 (released June 2014) * through FW810.20 (Released October 2014). * * Check if we are running on newer (post Oct 2014) firmware that * exports the OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to * patch the HMI interrupt and we catch it directly in Linux. * * For older firmware (i.e < FW810.20), we fallback to old behavior and * let OPAL patch the HMI vector and handle it inside OPAL firmware. * * For newer firmware we catch/handle the HMI directly in Linux. */ if (!opal_check_token(OPAL_HANDLE_HMI)) { pr_info("Old firmware detected, OPAL handles HMIs.\n"); opal_register_exception_handler( OPAL_HYPERVISOR_MAINTENANCE_HANDLER, 0, glue); glue += 128; } /* * Only applicable to ancient firmware, all modern * (post March 2015/skiboot 5.0) firmware will just return * OPAL_UNSUPPORTED. */ opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue); #endif return 0; } machine_early_initcall(powernv, opal_register_exception_handlers); static void queue_replay_msg(void *msg) { struct opal_msg_node *msg_node; if (msg_list_size < OPAL_MSG_QUEUE_MAX) { msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC); if (msg_node) { INIT_LIST_HEAD(&msg_node->list); memcpy(&msg_node->msg, msg, sizeof(struct opal_msg)); list_add_tail(&msg_node->list, &msg_list); msg_list_size++; } else pr_warn_once("message queue no memory\n"); if (msg_list_size >= OPAL_MSG_QUEUE_MAX) pr_warn_once("message queue full\n"); } } static void dequeue_replay_msg(enum opal_msg_type msg_type) { struct opal_msg_node *msg_node, *tmp; list_for_each_entry_safe(msg_node, tmp, &msg_list, list) { if (be32_to_cpu(msg_node->msg.msg_type) != msg_type) continue; atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type], msg_type, &msg_node->msg); list_del(&msg_node->list); kfree(msg_node); msg_list_size--; } } /* * Opal message notifier based on message type. Allow subscribers to get * notified for specific messgae type. */ int opal_message_notifier_register(enum opal_msg_type msg_type, struct notifier_block *nb) { int ret; unsigned long flags; if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) { pr_warn("%s: Invalid arguments, msg_type:%d\n", __func__, msg_type); return -EINVAL; } spin_lock_irqsave(&msg_list_lock, flags); ret = atomic_notifier_chain_register( &opal_msg_notifier_head[msg_type], nb); /* * If the registration succeeded, replay any queued messages that came * in prior to the notifier chain registration. msg_list_lock held here * to ensure they're delivered prior to any subsequent messages. */ if (ret == 0) dequeue_replay_msg(msg_type); spin_unlock_irqrestore(&msg_list_lock, flags); return ret; } EXPORT_SYMBOL_GPL(opal_message_notifier_register); int opal_message_notifier_unregister(enum opal_msg_type msg_type, struct notifier_block *nb) { return atomic_notifier_chain_unregister( &opal_msg_notifier_head[msg_type], nb); } EXPORT_SYMBOL_GPL(opal_message_notifier_unregister); static void opal_message_do_notify(uint32_t msg_type, void *msg) { unsigned long flags; bool queued = false; spin_lock_irqsave(&msg_list_lock, flags); if (opal_msg_notifier_head[msg_type].head == NULL) { /* * Queue up the msg since no notifiers have registered * yet for this msg_type. */ queue_replay_msg(msg); queued = true; } spin_unlock_irqrestore(&msg_list_lock, flags); if (queued) return; /* notify subscribers */ atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type], msg_type, msg); } static void opal_handle_message(void) { s64 ret; u32 type; ret = opal_get_msg(__pa(opal_msg), opal_msg_size); /* No opal message pending. */ if (ret == OPAL_RESOURCE) return; /* check for errors. */ if (ret) { pr_warn("%s: Failed to retrieve opal message, err=%lld\n", __func__, ret); return; } type = be32_to_cpu(opal_msg->msg_type); /* Sanity check */ if (type >= OPAL_MSG_TYPE_MAX) { pr_warn_once("%s: Unknown message type: %u\n", __func__, type); return; } opal_message_do_notify(type, (void *)opal_msg); } static irqreturn_t opal_message_notify(int irq, void *data) { opal_handle_message(); return IRQ_HANDLED; } static int __init opal_message_init(struct device_node *opal_node) { int ret, i, irq; ret = of_property_read_u32(opal_node, "opal-msg-size", &opal_msg_size); if (ret) { pr_notice("Failed to read opal-msg-size property\n"); opal_msg_size = sizeof(struct opal_msg); } opal_msg = kmalloc(opal_msg_size, GFP_KERNEL); if (!opal_msg) { opal_msg_size = sizeof(struct opal_msg); /* Try to allocate fixed message size */ opal_msg = kmalloc(opal_msg_size, GFP_KERNEL); BUG_ON(opal_msg == NULL); } for (i = 0; i < OPAL_MSG_TYPE_MAX; i++) ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]); irq = opal_event_request(ilog2(OPAL_EVENT_MSG_PENDING)); if (!irq) { pr_err("%s: Can't register OPAL event irq (%d)\n", __func__, irq); return irq; } ret = request_irq(irq, opal_message_notify, IRQ_TYPE_LEVEL_HIGH, "opal-msg", NULL); if (ret) { pr_err("%s: Can't request OPAL event irq (%d)\n", __func__, ret); return ret; } return 0; } int opal_get_chars(uint32_t vtermno, char *buf, int count) { s64 rc; __be64 evt, len; if (!opal.entry) return -ENODEV; opal_poll_events(&evt); if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0) return 0; len = cpu_to_be64(count); rc = opal_console_read(vtermno, &len, buf); if (rc == OPAL_SUCCESS) return be64_to_cpu(len); return 0; } static int __opal_put_chars(uint32_t vtermno, const char *data, int total_len, bool atomic) { unsigned long flags = 0 /* shut up gcc */; int written; __be64 olen; s64 rc; if (!opal.entry) return -ENODEV; if (atomic) spin_lock_irqsave(&opal_write_lock, flags); rc = opal_console_write_buffer_space(vtermno, &olen); if (rc || be64_to_cpu(olen) < total_len) { /* Closed -> drop characters */ if (rc) written = total_len; else written = -EAGAIN; goto out; } /* Should not get a partial write here because space is available. */ olen = cpu_to_be64(total_len); rc = opal_console_write(vtermno, &olen, data); if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { if (rc == OPAL_BUSY_EVENT) opal_poll_events(NULL); written = -EAGAIN; goto out; } /* Closed or other error drop */ if (rc != OPAL_SUCCESS) { written = opal_error_code(rc); goto out; } written = be64_to_cpu(olen); if (written < total_len) { if (atomic) { /* Should not happen */ pr_warn("atomic console write returned partial " "len=%d written=%d\n", total_len, written); } if (!written) written = -EAGAIN; } out: if (atomic) spin_unlock_irqrestore(&opal_write_lock, flags); return written; } int opal_put_chars(uint32_t vtermno, const char *data, int total_len) { return __opal_put_chars(vtermno, data, total_len, false); } /* * opal_put_chars_atomic will not perform partial-writes. Data will be * atomically written to the terminal or not at all. This is not strictly * true at the moment because console space can race with OPAL's console * writes. */ int opal_put_chars_atomic(uint32_t vtermno, const char *data, int total_len) { return __opal_put_chars(vtermno, data, total_len, true); } static s64 __opal_flush_console(uint32_t vtermno) { s64 rc; if (!opal_check_token(OPAL_CONSOLE_FLUSH)) { __be64 evt; /* * If OPAL_CONSOLE_FLUSH is not implemented in the firmware, * the console can still be flushed by calling the polling * function while it has OPAL_EVENT_CONSOLE_OUTPUT events. */ WARN_ONCE(1, "opal: OPAL_CONSOLE_FLUSH missing.\n"); opal_poll_events(&evt); if (!(be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT)) return OPAL_SUCCESS; return OPAL_BUSY; } else { rc = opal_console_flush(vtermno); if (rc == OPAL_BUSY_EVENT) { opal_poll_events(NULL); rc = OPAL_BUSY; } return rc; } } /* * opal_flush_console spins until the console is flushed */ int opal_flush_console(uint32_t vtermno) { for (;;) { s64 rc = __opal_flush_console(vtermno); if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) { mdelay(1); continue; } return opal_error_code(rc); } } /* * opal_flush_chars is an hvc interface that sleeps until the console is * flushed if wait, otherwise it will return -EBUSY if the console has data, * -EAGAIN if it has data and some of it was flushed. */ int opal_flush_chars(uint32_t vtermno, bool wait) { for (;;) { s64 rc = __opal_flush_console(vtermno); if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) { if (wait) { msleep(OPAL_BUSY_DELAY_MS); continue; } if (rc == OPAL_PARTIAL) return -EAGAIN; } return opal_error_code(rc); } } static int opal_recover_mce(struct pt_regs *regs, struct machine_check_event *evt) { int recovered = 0; if (regs_is_unrecoverable(regs)) { /* If MSR_RI isn't set, we cannot recover */ pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n"); recovered = 0; } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { /* Platform corrected itself */ recovered = 1; } else if (evt->severity == MCE_SEV_FATAL) { /* Fatal machine check */ pr_err("Machine check interrupt is fatal\n"); recovered = 0; } if (!recovered && evt->sync_error) { /* * Try to kill processes if we get a synchronous machine check * (e.g., one caused by execution of this instruction). This * will devolve into a panic if we try to kill init or are in * an interrupt etc. * * TODO: Queue up this address for hwpoisioning later. * TODO: This is not quite right for d-side machine * checks ->nip is not necessarily the important * address. */ if ((user_mode(regs))) { _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); recovered = 1; } else if (die_will_crash()) { /* * die() would kill the kernel, so better to go via * the platform reboot code that will log the * machine check. */ recovered = 0; } else { die_mce("Machine check", regs, SIGBUS); recovered = 1; } } return recovered; } void __noreturn pnv_platform_error_reboot(struct pt_regs *regs, const char *msg) { panic_flush_kmsg_start(); pr_emerg("Hardware platform error: %s\n", msg); if (regs) show_regs(regs); smp_send_stop(); panic_flush_kmsg_end(); /* * Don't bother to shut things down because this will * xstop the system. */ if (opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, msg) == OPAL_UNSUPPORTED) { pr_emerg("Reboot type %d not supported for %s\n", OPAL_REBOOT_PLATFORM_ERROR, msg); } /* * We reached here. There can be three possibilities: * 1. We are running on a firmware level that do not support * opal_cec_reboot2() * 2. We are running on a firmware level that do not support * OPAL_REBOOT_PLATFORM_ERROR reboot type. * 3. We are running on FSP based system that does not need * opal to trigger checkstop explicitly for error analysis. * The FSP PRD component would have already got notified * about this error through other channels. * 4. We are running on a newer skiboot that by default does * not cause a checkstop, drops us back to the kernel to * extract context and state at the time of the error. */ panic(msg); } int opal_machine_check(struct pt_regs *regs) { struct machine_check_event evt; if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) return 0; /* Print things out */ if (evt.version != MCE_V1) { pr_err("Machine Check Exception, Unknown event version %d !\n", evt.version); return 0; } machine_check_print_event_info(&evt, user_mode(regs), false); if (opal_recover_mce(regs, &evt)) return 1; pnv_platform_error_reboot(regs, "Unrecoverable Machine Check exception"); } /* Early hmi handler called in real mode. */ int opal_hmi_exception_early(struct pt_regs *regs) { s64 rc; /* * call opal hmi handler. Pass paca address as token. * The return value OPAL_SUCCESS is an indication that there is * an HMI event generated waiting to pull by Linux. */ rc = opal_handle_hmi(); if (rc == OPAL_SUCCESS) { local_paca->hmi_event_available = 1; return 1; } return 0; } int opal_hmi_exception_early2(struct pt_regs *regs) { s64 rc; __be64 out_flags; /* * call opal hmi handler. * Check 64-bit flag mask to find out if an event was generated, * and whether TB is still valid or not etc. */ rc = opal_handle_hmi2(&out_flags); if (rc != OPAL_SUCCESS) return 0; if (be64_to_cpu(out_flags) & OPAL_HMI_FLAGS_NEW_EVENT) local_paca->hmi_event_available = 1; if (be64_to_cpu(out_flags) & OPAL_HMI_FLAGS_TOD_TB_FAIL) tb_invalid = true; return 1; } /* HMI exception handler called in virtual mode when irqs are next enabled. */ int opal_handle_hmi_exception(struct pt_regs *regs) { /* * Check if HMI event is available. * if Yes, then wake kopald to process them. */ if (!local_paca->hmi_event_available) return 0; local_paca->hmi_event_available = 0; opal_wake_poller(); return 1; } static uint64_t find_recovery_address(uint64_t nip) { int i; for (i = 0; i < mc_recoverable_range_len; i++) if ((nip >= mc_recoverable_range[i].start_addr) && (nip < mc_recoverable_range[i].end_addr)) return mc_recoverable_range[i].recover_addr; return 0; } bool opal_mce_check_early_recovery(struct pt_regs *regs) { uint64_t recover_addr = 0; if (!opal.base || !opal.size) goto out; if ((regs->nip >= opal.base) && (regs->nip < (opal.base + opal.size))) recover_addr = find_recovery_address(regs->nip); /* * Setup regs->nip to rfi into fixup address. */ if (recover_addr) regs_set_return_ip(regs, recover_addr); out: return !!recover_addr; } static int __init opal_sysfs_init(void) { opal_kobj = kobject_create_and_add("opal", firmware_kobj); if (!opal_kobj) { pr_warn("kobject_create_and_add opal failed\n"); return -ENOMEM; } return 0; } static ssize_t export_attr_read(struct file *fp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { return memory_read_from_buffer(buf, count, &off, bin_attr->private, bin_attr->size); } static int opal_add_one_export(struct kobject *parent, const char *export_name, struct device_node *np, const char *prop_name) { struct bin_attribute *attr = NULL; const char *name = NULL; u64 vals[2]; int rc; rc = of_property_read_u64_array(np, prop_name, &vals[0], 2); if (rc) goto out; attr = kzalloc(sizeof(*attr), GFP_KERNEL); if (!attr) { rc = -ENOMEM; goto out; } name = kstrdup(export_name, GFP_KERNEL); if (!name) { rc = -ENOMEM; goto out; } sysfs_bin_attr_init(attr); attr->attr.name = name; attr->attr.mode = 0400; attr->read = export_attr_read; attr->private = __va(vals[0]); attr->size = vals[1]; rc = sysfs_create_bin_file(parent, attr); out: if (rc) { kfree(name); kfree(attr); } return rc; } static void opal_add_exported_attrs(struct device_node *np, struct kobject *kobj) { struct device_node *child; struct property *prop; for_each_property_of_node(np, prop) { int rc; if (!strcmp(prop->name, "name") || !strcmp(prop->name, "phandle")) continue; rc = opal_add_one_export(kobj, prop->name, np, prop->name); if (rc) { pr_warn("Unable to add export %pOF/%s, rc = %d!\n", np, prop->name, rc); } } for_each_child_of_node(np, child) { struct kobject *child_kobj; child_kobj = kobject_create_and_add(child->name, kobj); if (!child_kobj) { pr_err("Unable to create export dir for %pOF\n", child); continue; } opal_add_exported_attrs(child, child_kobj); } } /* * opal_export_attrs: creates a sysfs node for each property listed in * the device-tree under /ibm,opal/firmware/exports/ * All new sysfs nodes are created under /opal/exports/. * This allows for reserved memory regions (e.g. HDAT) to be read. * The new sysfs nodes are only readable by root. */ static void opal_export_attrs(void) { struct device_node *np; struct kobject *kobj; int rc; np = of_find_node_by_path("/ibm,opal/firmware/exports"); if (!np) return; /* Create new 'exports' directory - /sys/firmware/opal/exports */ kobj = kobject_create_and_add("exports", opal_kobj); if (!kobj) { pr_warn("kobject_create_and_add() of exports failed\n"); of_node_put(np); return; } opal_add_exported_attrs(np, kobj); /* * NB: symbol_map existed before the generic export interface so it * lives under the top level opal_kobj. */ rc = opal_add_one_export(opal_kobj, "symbol_map", np->parent, "symbol-map"); if (rc) pr_warn("Error %d creating OPAL symbols file\n", rc); of_node_put(np); } static void __init opal_dump_region_init(void) { void *addr; uint64_t size; int rc; if (!opal_check_token(OPAL_REGISTER_DUMP_REGION)) return; /* Register kernel log buffer */ addr = log_buf_addr_get(); if (addr == NULL) return; size = log_buf_len_get(); if (size == 0) return; rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF, __pa(addr), size); /* Don't warn if this is just an older OPAL that doesn't * know about that call */ if (rc && rc != OPAL_UNSUPPORTED) pr_warn("DUMP: Failed to register kernel log buffer. " "rc = %d\n", rc); } static void __init opal_pdev_init(const char *compatible) { struct device_node *np; for_each_compatible_node(np, NULL, compatible) of_platform_device_create(np, NULL, NULL); } static void __init opal_imc_init_dev(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, IMC_DTB_COMPAT); if (np) of_platform_device_create(np, NULL, NULL); of_node_put(np); } static int kopald(void *unused) { unsigned long timeout = msecs_to_jiffies(opal_heartbeat) + 1; set_freezable(); do { try_to_freeze(); opal_handle_events(); set_current_state(TASK_INTERRUPTIBLE); if (opal_have_pending_events()) __set_current_state(TASK_RUNNING); else schedule_timeout(timeout); } while (!kthread_should_stop()); return 0; } void opal_wake_poller(void) { if (kopald_tsk) wake_up_process(kopald_tsk); } static void __init opal_init_heartbeat(void) { /* Old firwmware, we assume the HVC heartbeat is sufficient */ if (of_property_read_u32(opal_node, "ibm,heartbeat-ms", &opal_heartbeat) != 0) opal_heartbeat = 0; if (opal_heartbeat) kopald_tsk = kthread_run(kopald, NULL, "kopald"); } static int __init opal_init(void) { struct device_node *np, *consoles, *leds; int rc; opal_node = of_find_node_by_path("/ibm,opal"); if (!opal_node) { pr_warn("Device node not found\n"); return -ENODEV; } /* Register OPAL consoles if any ports */ consoles = of_find_node_by_path("/ibm,opal/consoles"); if (consoles) { for_each_child_of_node(consoles, np) { if (!of_node_name_eq(np, "serial")) continue; of_platform_device_create(np, NULL, NULL); } of_node_put(consoles); } /* Initialise OPAL messaging system */ opal_message_init(opal_node); /* Initialise OPAL asynchronous completion interface */ opal_async_comp_init(); /* Initialise OPAL sensor interface */ opal_sensor_init(); /* Initialise OPAL hypervisor maintainence interrupt handling */ opal_hmi_handler_init(); /* Create i2c platform devices */ opal_pdev_init("ibm,opal-i2c"); /* Handle non-volatile memory devices */ opal_pdev_init("pmem-region"); /* Setup a heatbeat thread if requested by OPAL */ opal_init_heartbeat(); /* Detect In-Memory Collection counters and create devices*/ opal_imc_init_dev(); /* Create leds platform devices */ leds = of_find_node_by_path("/ibm,opal/leds"); if (leds) { of_platform_device_create(leds, "opal_leds", NULL); of_node_put(leds); } /* Initialise OPAL message log interface */ opal_msglog_init(); /* Create "opal" kobject under /sys/firmware */ rc = opal_sysfs_init(); if (rc == 0) { /* Setup dump region interface */ opal_dump_region_init(); /* Setup error log interface */ rc = opal_elog_init(); /* Setup code update interface */ opal_flash_update_init(); /* Setup platform dump extract interface */ opal_platform_dump_init(); /* Setup system parameters interface */ opal_sys_param_init(); /* Setup message log sysfs interface. */ opal_msglog_sysfs_init(); /* Add all export properties*/ opal_export_attrs(); } /* Initialize platform devices: IPMI backend, PRD & flash interface */ opal_pdev_init("ibm,opal-ipmi"); opal_pdev_init("ibm,opal-flash"); opal_pdev_init("ibm,opal-prd"); /* Initialise platform device: oppanel interface */ opal_pdev_init("ibm,opal-oppanel"); /* Initialise OPAL kmsg dumper for flushing console on panic */ opal_kmsg_init(); /* Initialise OPAL powercap interface */ opal_powercap_init(); /* Initialise OPAL Power-Shifting-Ratio interface */ opal_psr_init(); /* Initialise OPAL sensor groups */ opal_sensor_groups_init(); /* Initialise OPAL Power control interface */ opal_power_control_init(); /* Initialize OPAL secure variables */ opal_pdev_init("ibm,secvar-backend"); return 0; } machine_subsys_initcall(powernv, opal_init); void opal_shutdown(void) { long rc = OPAL_BUSY; opal_event_shutdown(); /* * Then sync with OPAL which ensure anything that can * potentially write to our memory has completed such * as an ongoing dump retrieval */ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_sync_host_reboot(); if (rc == OPAL_BUSY) opal_poll_events(NULL); else mdelay(10); } /* Unregister memory dump region */ if (opal_check_token(OPAL_UNREGISTER_DUMP_REGION)) opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF); } /* Export this so that test modules can use it */ EXPORT_SYMBOL_GPL(opal_invalid_call); EXPORT_SYMBOL_GPL(opal_xscom_read); EXPORT_SYMBOL_GPL(opal_xscom_write); EXPORT_SYMBOL_GPL(opal_ipmi_send); EXPORT_SYMBOL_GPL(opal_ipmi_recv); EXPORT_SYMBOL_GPL(opal_flash_read); EXPORT_SYMBOL_GPL(opal_flash_write); EXPORT_SYMBOL_GPL(opal_flash_erase); EXPORT_SYMBOL_GPL(opal_prd_msg); EXPORT_SYMBOL_GPL(opal_check_token); /* Convert a region of vmalloc memory to an opal sg list */ struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, unsigned long vmalloc_size) { struct opal_sg_list *sg, *first = NULL; unsigned long i = 0; sg = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!sg) goto nomem; first = sg; while (vmalloc_size > 0) { uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT; uint64_t length = min(vmalloc_size, PAGE_SIZE); sg->entry[i].data = cpu_to_be64(data); sg->entry[i].length = cpu_to_be64(length); i++; if (i >= SG_ENTRIES_PER_NODE) { struct opal_sg_list *next; next = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!next) goto nomem; sg->length = cpu_to_be64( i * sizeof(struct opal_sg_entry) + 16); i = 0; sg->next = cpu_to_be64(__pa(next)); sg = next; } vmalloc_addr += length; vmalloc_size -= length; } sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16); return first; nomem: pr_err("%s : Failed to allocate memory\n", __func__); opal_free_sg_list(first); return NULL; } void opal_free_sg_list(struct opal_sg_list *sg) { while (sg) { uint64_t next = be64_to_cpu(sg->next); kfree(sg); if (next) sg = __va(next); else sg = NULL; } } int opal_error_code(int rc) { switch (rc) { case OPAL_SUCCESS: return 0; case OPAL_PARAMETER: return -EINVAL; case OPAL_ASYNC_COMPLETION: return -EINPROGRESS; case OPAL_BUSY: case OPAL_BUSY_EVENT: return -EBUSY; case OPAL_NO_MEM: return -ENOMEM; case OPAL_PERMISSION: return -EPERM; case OPAL_UNSUPPORTED: return -EIO; case OPAL_HARDWARE: return -EIO; case OPAL_INTERNAL_ERROR: return -EIO; case OPAL_TIMEOUT: return -ETIMEDOUT; default: pr_err("%s: unexpected OPAL error %d\n", __func__, rc); return -EIO; } } void powernv_set_nmmu_ptcr(unsigned long ptcr) { int rc; if (firmware_has_feature(FW_FEATURE_OPAL)) { rc = opal_nmmu_set_ptcr(-1UL, ptcr); if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED) pr_warn("%s: Unable to set nest mmu ptcr\n", __func__); } } EXPORT_SYMBOL_GPL(opal_poll_events); EXPORT_SYMBOL_GPL(opal_rtc_read); EXPORT_SYMBOL_GPL(opal_rtc_write); EXPORT_SYMBOL_GPL(opal_tpo_read); EXPORT_SYMBOL_GPL(opal_tpo_write); EXPORT_SYMBOL_GPL(opal_i2c_request); /* Export these symbols for PowerNV LED class driver */ EXPORT_SYMBOL_GPL(opal_leds_get_ind); EXPORT_SYMBOL_GPL(opal_leds_set_ind); /* Export this symbol for PowerNV Operator Panel class driver */ EXPORT_SYMBOL_GPL(opal_write_oppanel_async); /* Export this for KVM */ EXPORT_SYMBOL_GPL(opal_int_set_mfrr); EXPORT_SYMBOL_GPL(opal_int_eoi); EXPORT_SYMBOL_GPL(opal_error_code); /* Export the below symbol for NX compression */ EXPORT_SYMBOL(opal_nx_coproc_init);
linux-master
arch/powerpc/platforms/powernv/opal.c
// SPDX-License-Identifier: GPL-2.0+ // Copyright 2017 IBM Corp. #include <asm/pnv-ocxl.h> #include <asm/opal.h> #include <misc/ocxl-config.h> #include "pci.h" #define PNV_OCXL_TL_P9_RECV_CAP 0x000000000000000Full #define PNV_OCXL_ACTAG_MAX 64 /* PASIDs are 20-bit, but on P9, NPU can only handle 15 bits */ #define PNV_OCXL_PASID_BITS 15 #define PNV_OCXL_PASID_MAX ((1 << PNV_OCXL_PASID_BITS) - 1) #define AFU_PRESENT (1 << 31) #define AFU_INDEX_MASK 0x3F000000 #define AFU_INDEX_SHIFT 24 #define ACTAG_MASK 0xFFF struct actag_range { u16 start; u16 count; }; struct npu_link { struct list_head list; int domain; int bus; int dev; u16 fn_desired_actags[8]; struct actag_range fn_actags[8]; bool assignment_done; }; static struct list_head links_list = LIST_HEAD_INIT(links_list); static DEFINE_MUTEX(links_list_lock); /* * opencapi actags handling: * * When sending commands, the opencapi device references the memory * context it's targeting with an 'actag', which is really an alias * for a (BDF, pasid) combination. When it receives a command, the NPU * must do a lookup of the actag to identify the memory context. The * hardware supports a finite number of actags per link (64 for * POWER9). * * The device can carry multiple functions, and each function can have * multiple AFUs. Each AFU advertises in its config space the number * of desired actags. The host must configure in the config space of * the AFU how many actags the AFU is really allowed to use (which can * be less than what the AFU desires). * * When a PCI function is probed by the driver, it has no visibility * about the other PCI functions and how many actags they'd like, * which makes it impossible to distribute actags fairly among AFUs. * * Unfortunately, the only way to know how many actags a function * desires is by looking at the data for each AFU in the config space * and add them up. Similarly, the only way to know how many actags * all the functions of the physical device desire is by adding the * previously computed function counts. Then we can match that against * what the hardware supports. * * To get a comprehensive view, we use a 'pci fixup': at the end of * PCI enumeration, each function counts how many actags its AFUs * desire and we save it in a 'npu_link' structure, shared between all * the PCI functions of a same device. Therefore, when the first * function is probed by the driver, we can get an idea of the total * count of desired actags for the device, and assign the actags to * the AFUs, by pro-rating if needed. */ static int find_dvsec_from_pos(struct pci_dev *dev, int dvsec_id, int pos) { int vsec = pos; u16 vendor, id; while ((vsec = pci_find_next_ext_capability(dev, vsec, OCXL_EXT_CAP_ID_DVSEC))) { pci_read_config_word(dev, vsec + OCXL_DVSEC_VENDOR_OFFSET, &vendor); pci_read_config_word(dev, vsec + OCXL_DVSEC_ID_OFFSET, &id); if (vendor == PCI_VENDOR_ID_IBM && id == dvsec_id) return vsec; } return 0; } static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx) { int vsec = 0; u8 idx; while ((vsec = find_dvsec_from_pos(dev, OCXL_DVSEC_AFU_CTRL_ID, vsec))) { pci_read_config_byte(dev, vsec + OCXL_DVSEC_AFU_CTRL_AFU_IDX, &idx); if (idx == afu_idx) return vsec; } return 0; } static int get_max_afu_index(struct pci_dev *dev, int *afu_idx) { int pos; u32 val; pos = pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM, OCXL_DVSEC_FUNC_ID); if (!pos) return -ESRCH; pci_read_config_dword(dev, pos + OCXL_DVSEC_FUNC_OFF_INDEX, &val); if (val & AFU_PRESENT) *afu_idx = (val & AFU_INDEX_MASK) >> AFU_INDEX_SHIFT; else *afu_idx = -1; return 0; } static int get_actag_count(struct pci_dev *dev, int afu_idx, int *actag) { int pos; u16 actag_sup; pos = find_dvsec_afu_ctrl(dev, afu_idx); if (!pos) return -ESRCH; pci_read_config_word(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_SUP, &actag_sup); *actag = actag_sup & ACTAG_MASK; return 0; } static struct npu_link *find_link(struct pci_dev *dev) { struct npu_link *link; list_for_each_entry(link, &links_list, list) { /* The functions of a device all share the same link */ if (link->domain == pci_domain_nr(dev->bus) && link->bus == dev->bus->number && link->dev == PCI_SLOT(dev->devfn)) { return link; } } /* link doesn't exist yet. Allocate one */ link = kzalloc(sizeof(struct npu_link), GFP_KERNEL); if (!link) return NULL; link->domain = pci_domain_nr(dev->bus); link->bus = dev->bus->number; link->dev = PCI_SLOT(dev->devfn); list_add(&link->list, &links_list); return link; } static void pnv_ocxl_fixup_actag(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; struct npu_link *link; int rc, afu_idx = -1, i, actag; if (!machine_is(powernv)) return; if (phb->type != PNV_PHB_NPU_OCAPI) return; mutex_lock(&links_list_lock); link = find_link(dev); if (!link) { dev_warn(&dev->dev, "couldn't update actag information\n"); mutex_unlock(&links_list_lock); return; } /* * Check how many actags are desired for the AFUs under that * function and add it to the count for the link */ rc = get_max_afu_index(dev, &afu_idx); if (rc) { /* Most likely an invalid config space */ dev_dbg(&dev->dev, "couldn't find AFU information\n"); afu_idx = -1; } link->fn_desired_actags[PCI_FUNC(dev->devfn)] = 0; for (i = 0; i <= afu_idx; i++) { /* * AFU index 'holes' are allowed. So don't fail if we * can't read the actag info for an index */ rc = get_actag_count(dev, i, &actag); if (rc) continue; link->fn_desired_actags[PCI_FUNC(dev->devfn)] += actag; } dev_dbg(&dev->dev, "total actags for function: %d\n", link->fn_desired_actags[PCI_FUNC(dev->devfn)]); mutex_unlock(&links_list_lock); } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_ocxl_fixup_actag); static u16 assign_fn_actags(u16 desired, u16 total) { u16 count; if (total <= PNV_OCXL_ACTAG_MAX) count = desired; else count = PNV_OCXL_ACTAG_MAX * desired / total; return count; } static void assign_actags(struct npu_link *link) { u16 actag_count, range_start = 0, total_desired = 0; int i; for (i = 0; i < 8; i++) total_desired += link->fn_desired_actags[i]; for (i = 0; i < 8; i++) { if (link->fn_desired_actags[i]) { actag_count = assign_fn_actags( link->fn_desired_actags[i], total_desired); link->fn_actags[i].start = range_start; link->fn_actags[i].count = actag_count; range_start += actag_count; WARN_ON(range_start >= PNV_OCXL_ACTAG_MAX); } pr_debug("link %x:%x:%x fct %d actags: start=%d count=%d (desired=%d)\n", link->domain, link->bus, link->dev, i, link->fn_actags[i].start, link->fn_actags[i].count, link->fn_desired_actags[i]); } link->assignment_done = true; } int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled, u16 *supported) { struct npu_link *link; mutex_lock(&links_list_lock); link = find_link(dev); if (!link) { dev_err(&dev->dev, "actag information not found\n"); mutex_unlock(&links_list_lock); return -ENODEV; } /* * On p9, we only have 64 actags per link, so they must be * shared by all the functions of the same adapter. We counted * the desired actag counts during PCI enumeration, so that we * can allocate a pro-rated number of actags to each function. */ if (!link->assignment_done) assign_actags(link); *base = link->fn_actags[PCI_FUNC(dev->devfn)].start; *enabled = link->fn_actags[PCI_FUNC(dev->devfn)].count; *supported = link->fn_desired_actags[PCI_FUNC(dev->devfn)]; mutex_unlock(&links_list_lock); return 0; } EXPORT_SYMBOL_GPL(pnv_ocxl_get_actag); int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count) { struct npu_link *link; int i, rc = -EINVAL; /* * The number of PASIDs (process address space ID) which can * be used by a function depends on how many functions exist * on the device. The NPU needs to be configured to know how * many bits are available to PASIDs and how many are to be * used by the function BDF identifier. * * We only support one AFU-carrying function for now. */ mutex_lock(&links_list_lock); link = find_link(dev); if (!link) { dev_err(&dev->dev, "actag information not found\n"); mutex_unlock(&links_list_lock); return -ENODEV; } for (i = 0; i < 8; i++) if (link->fn_desired_actags[i] && (i == PCI_FUNC(dev->devfn))) { *count = PNV_OCXL_PASID_MAX; rc = 0; break; } mutex_unlock(&links_list_lock); dev_dbg(&dev->dev, "%d PASIDs available for function\n", rc ? 0 : *count); return rc; } EXPORT_SYMBOL_GPL(pnv_ocxl_get_pasid_count); static void set_templ_rate(unsigned int templ, unsigned int rate, char *buf) { int shift, idx; WARN_ON(templ > PNV_OCXL_TL_MAX_TEMPLATE); idx = (PNV_OCXL_TL_MAX_TEMPLATE - templ) / 2; shift = 4 * (1 - ((PNV_OCXL_TL_MAX_TEMPLATE - templ) % 2)); buf[idx] |= rate << shift; } int pnv_ocxl_get_tl_cap(struct pci_dev *dev, long *cap, char *rate_buf, int rate_buf_size) { if (rate_buf_size != PNV_OCXL_TL_RATE_BUF_SIZE) return -EINVAL; /* * The TL capabilities are a characteristic of the NPU, so * we go with hard-coded values. * * The receiving rate of each template is encoded on 4 bits. * * On P9: * - templates 0 -> 3 are supported * - templates 0, 1 and 3 have a 0 receiving rate * - template 2 has receiving rate of 1 (extra cycle) */ memset(rate_buf, 0, rate_buf_size); set_templ_rate(2, 1, rate_buf); *cap = PNV_OCXL_TL_P9_RECV_CAP; return 0; } EXPORT_SYMBOL_GPL(pnv_ocxl_get_tl_cap); int pnv_ocxl_set_tl_conf(struct pci_dev *dev, long cap, uint64_t rate_buf_phys, int rate_buf_size) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; int rc; if (rate_buf_size != PNV_OCXL_TL_RATE_BUF_SIZE) return -EINVAL; rc = opal_npu_tl_set(phb->opal_id, dev->devfn, cap, rate_buf_phys, rate_buf_size); if (rc) { dev_err(&dev->dev, "Can't configure host TL: %d\n", rc); return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(pnv_ocxl_set_tl_conf); int pnv_ocxl_get_xsl_irq(struct pci_dev *dev, int *hwirq) { int rc; rc = of_property_read_u32(dev->dev.of_node, "ibm,opal-xsl-irq", hwirq); if (rc) { dev_err(&dev->dev, "Can't get translation interrupt for device\n"); return rc; } return 0; } EXPORT_SYMBOL_GPL(pnv_ocxl_get_xsl_irq); void pnv_ocxl_unmap_xsl_regs(void __iomem *dsisr, void __iomem *dar, void __iomem *tfc, void __iomem *pe_handle) { iounmap(dsisr); iounmap(dar); iounmap(tfc); iounmap(pe_handle); } EXPORT_SYMBOL_GPL(pnv_ocxl_unmap_xsl_regs); int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr, void __iomem **dar, void __iomem **tfc, void __iomem **pe_handle) { u64 reg; int i, j, rc = 0; void __iomem *regs[4]; /* * opal stores the mmio addresses of the DSISR, DAR, TFC and * PE_HANDLE registers in a device tree property, in that * order */ for (i = 0; i < 4; i++) { rc = of_property_read_u64_index(dev->dev.of_node, "ibm,opal-xsl-mmio", i, &reg); if (rc) break; regs[i] = ioremap(reg, 8); if (!regs[i]) { rc = -EINVAL; break; } } if (rc) { dev_err(&dev->dev, "Can't map translation mmio registers\n"); for (j = i - 1; j >= 0; j--) iounmap(regs[j]); } else { *dsisr = regs[0]; *dar = regs[1]; *tfc = regs[2]; *pe_handle = regs[3]; } return rc; } EXPORT_SYMBOL_GPL(pnv_ocxl_map_xsl_regs); struct spa_data { u64 phb_opal_id; u32 bdfn; }; int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask, void **platform_data) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; struct spa_data *data; u32 bdfn; int rc; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; bdfn = pci_dev_id(dev); rc = opal_npu_spa_setup(phb->opal_id, bdfn, virt_to_phys(spa_mem), PE_mask); if (rc) { dev_err(&dev->dev, "Can't setup Shared Process Area: %d\n", rc); kfree(data); return rc; } data->phb_opal_id = phb->opal_id; data->bdfn = bdfn; *platform_data = (void *) data; return 0; } EXPORT_SYMBOL_GPL(pnv_ocxl_spa_setup); void pnv_ocxl_spa_release(void *platform_data) { struct spa_data *data = (struct spa_data *) platform_data; int rc; rc = opal_npu_spa_setup(data->phb_opal_id, data->bdfn, 0, 0); WARN_ON(rc); kfree(data); } EXPORT_SYMBOL_GPL(pnv_ocxl_spa_release); int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle) { struct spa_data *data = (struct spa_data *) platform_data; return opal_npu_spa_clear_cache(data->phb_opal_id, data->bdfn, pe_handle); } EXPORT_SYMBOL_GPL(pnv_ocxl_spa_remove_pe_from_cache); int pnv_ocxl_map_lpar(struct pci_dev *dev, uint64_t lparid, uint64_t lpcr, void __iomem **arva) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; u64 mmio_atsd; int rc; /* ATSD physical address. * ATSD LAUNCH register: write access initiates a shoot down to * initiate the TLB Invalidate command. */ rc = of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", 0, &mmio_atsd); if (rc) { dev_info(&dev->dev, "No available ATSD found\n"); return rc; } /* Assign a register set to a Logical Partition and MMIO ATSD * LPARID register to the required value. */ rc = opal_npu_map_lpar(phb->opal_id, pci_dev_id(dev), lparid, lpcr); if (rc) { dev_err(&dev->dev, "Error mapping device to LPAR: %d\n", rc); return rc; } *arva = ioremap(mmio_atsd, 24); if (!(*arva)) { dev_warn(&dev->dev, "ioremap failed - mmio_atsd: %#llx\n", mmio_atsd); rc = -ENOMEM; } return rc; } EXPORT_SYMBOL_GPL(pnv_ocxl_map_lpar); void pnv_ocxl_unmap_lpar(void __iomem *arva) { iounmap(arva); } EXPORT_SYMBOL_GPL(pnv_ocxl_unmap_lpar); void pnv_ocxl_tlb_invalidate(void __iomem *arva, unsigned long pid, unsigned long addr, unsigned long page_size) { unsigned long timeout = jiffies + (HZ * PNV_OCXL_ATSD_TIMEOUT); u64 val = 0ull; int pend; u8 size; if (!(arva)) return; if (addr) { /* load Abbreviated Virtual Address register with * the necessary value */ val |= FIELD_PREP(PNV_OCXL_ATSD_AVA_AVA, addr >> (63-51)); out_be64(arva + PNV_OCXL_ATSD_AVA, val); } /* Write access initiates a shoot down to initiate the * TLB Invalidate command */ val = PNV_OCXL_ATSD_LNCH_R; val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_RIC, 0b10); if (addr) val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_IS, 0b00); else { val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_IS, 0b01); val |= PNV_OCXL_ATSD_LNCH_OCAPI_SINGLETON; } val |= PNV_OCXL_ATSD_LNCH_PRS; /* Actual Page Size to be invalidated * 000 4KB * 101 64KB * 001 2MB * 010 1GB */ size = 0b101; if (page_size == 0x1000) size = 0b000; if (page_size == 0x200000) size = 0b001; if (page_size == 0x40000000) size = 0b010; val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_AP, size); val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_PID, pid); out_be64(arva + PNV_OCXL_ATSD_LNCH, val); /* Poll the ATSD status register to determine when the * TLB Invalidate has been completed. */ val = in_be64(arva + PNV_OCXL_ATSD_STAT); pend = val >> 63; while (pend) { if (time_after_eq(jiffies, timeout)) { pr_err("%s - Timeout while reading XTS MMIO ATSD status register (val=%#llx, pidr=0x%lx)\n", __func__, val, pid); return; } cpu_relax(); val = in_be64(arva + PNV_OCXL_ATSD_STAT); pend = val >> 63; } } EXPORT_SYMBOL_GPL(pnv_ocxl_tlb_invalidate);
linux-master
arch/powerpc/platforms/powernv/ocxl.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/percpu.h> #include <linux/jump_label.h> #include <asm/interrupt.h> #include <asm/opal-api.h> #include <asm/trace.h> #include <asm/asm-prototypes.h> #ifdef CONFIG_TRACEPOINTS /* * Since the tracing code might execute OPAL calls we need to guard against * recursion. */ static DEFINE_PER_CPU(unsigned int, opal_trace_depth); static void __trace_opal_entry(s64 a0, s64 a1, s64 a2, s64 a3, s64 a4, s64 a5, s64 a6, s64 a7, unsigned long opcode) { unsigned int *depth; unsigned long args[8]; depth = this_cpu_ptr(&opal_trace_depth); if (*depth) return; args[0] = a0; args[1] = a1; args[2] = a2; args[3] = a3; args[4] = a4; args[5] = a5; args[6] = a6; args[7] = a7; (*depth)++; trace_opal_entry(opcode, &args[0]); (*depth)--; } static void __trace_opal_exit(unsigned long opcode, unsigned long retval) { unsigned int *depth; depth = this_cpu_ptr(&opal_trace_depth); if (*depth) return; (*depth)++; trace_opal_exit(opcode, retval); (*depth)--; } static DEFINE_STATIC_KEY_FALSE(opal_tracepoint_key); int opal_tracepoint_regfunc(void) { static_branch_inc(&opal_tracepoint_key); return 0; } void opal_tracepoint_unregfunc(void) { static_branch_dec(&opal_tracepoint_key); } static s64 __opal_call_trace(s64 a0, s64 a1, s64 a2, s64 a3, s64 a4, s64 a5, s64 a6, s64 a7, unsigned long opcode, unsigned long msr) { s64 ret; __trace_opal_entry(a0, a1, a2, a3, a4, a5, a6, a7, opcode); ret = __opal_call(a0, a1, a2, a3, a4, a5, a6, a7, opcode, msr); __trace_opal_exit(opcode, ret); return ret; } #define DO_TRACE (static_branch_unlikely(&opal_tracepoint_key)) #else /* CONFIG_TRACEPOINTS */ static s64 __opal_call_trace(s64 a0, s64 a1, s64 a2, s64 a3, s64 a4, s64 a5, s64 a6, s64 a7, unsigned long opcode, unsigned long msr) { return 0; } #define DO_TRACE false #endif /* CONFIG_TRACEPOINTS */ static int64_t opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3, int64_t a4, int64_t a5, int64_t a6, int64_t a7, int64_t opcode) { unsigned long flags; unsigned long msr = mfmsr(); bool mmu = (msr & (MSR_IR|MSR_DR)); int64_t ret; /* OPAL call / firmware may use SRR and/or HSRR */ srr_regs_clobbered(); msr &= ~MSR_EE; if (unlikely(!mmu)) return __opal_call(a0, a1, a2, a3, a4, a5, a6, a7, opcode, msr); local_save_flags(flags); hard_irq_disable(); if (DO_TRACE) { ret = __opal_call_trace(a0, a1, a2, a3, a4, a5, a6, a7, opcode, msr); } else { ret = __opal_call(a0, a1, a2, a3, a4, a5, a6, a7, opcode, msr); } local_irq_restore(flags); return ret; } #define OPAL_CALL(name, opcode) \ int64_t name(int64_t a0, int64_t a1, int64_t a2, int64_t a3, \ int64_t a4, int64_t a5, int64_t a6, int64_t a7); \ int64_t name(int64_t a0, int64_t a1, int64_t a2, int64_t a3, \ int64_t a4, int64_t a5, int64_t a6, int64_t a7) \ { \ return opal_call(a0, a1, a2, a3, a4, a5, a6, a7, opcode); \ } OPAL_CALL(opal_invalid_call, OPAL_INVALID_CALL); OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE); OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ); OPAL_CALL(opal_console_write_buffer_space, OPAL_CONSOLE_WRITE_BUFFER_SPACE); OPAL_CALL(opal_rtc_read, OPAL_RTC_READ); OPAL_CALL(opal_rtc_write, OPAL_RTC_WRITE); OPAL_CALL(opal_cec_power_down, OPAL_CEC_POWER_DOWN); OPAL_CALL(opal_cec_reboot, OPAL_CEC_REBOOT); OPAL_CALL(opal_cec_reboot2, OPAL_CEC_REBOOT2); OPAL_CALL(opal_read_nvram, OPAL_READ_NVRAM); OPAL_CALL(opal_write_nvram, OPAL_WRITE_NVRAM); OPAL_CALL(opal_handle_interrupt, OPAL_HANDLE_INTERRUPT); OPAL_CALL(opal_poll_events, OPAL_POLL_EVENTS); OPAL_CALL(opal_pci_set_hub_tce_memory, OPAL_PCI_SET_HUB_TCE_MEMORY); OPAL_CALL(opal_pci_set_phb_tce_memory, OPAL_PCI_SET_PHB_TCE_MEMORY); OPAL_CALL(opal_pci_config_read_byte, OPAL_PCI_CONFIG_READ_BYTE); OPAL_CALL(opal_pci_config_read_half_word, OPAL_PCI_CONFIG_READ_HALF_WORD); OPAL_CALL(opal_pci_config_read_word, OPAL_PCI_CONFIG_READ_WORD); OPAL_CALL(opal_pci_config_write_byte, OPAL_PCI_CONFIG_WRITE_BYTE); OPAL_CALL(opal_pci_config_write_half_word, OPAL_PCI_CONFIG_WRITE_HALF_WORD); OPAL_CALL(opal_pci_config_write_word, OPAL_PCI_CONFIG_WRITE_WORD); OPAL_CALL(opal_set_xive, OPAL_SET_XIVE); OPAL_CALL(opal_get_xive, OPAL_GET_XIVE); OPAL_CALL(opal_register_exception_handler, OPAL_REGISTER_OPAL_EXCEPTION_HANDLER); OPAL_CALL(opal_pci_eeh_freeze_status, OPAL_PCI_EEH_FREEZE_STATUS); OPAL_CALL(opal_pci_eeh_freeze_clear, OPAL_PCI_EEH_FREEZE_CLEAR); OPAL_CALL(opal_pci_eeh_freeze_set, OPAL_PCI_EEH_FREEZE_SET); OPAL_CALL(opal_pci_err_inject, OPAL_PCI_ERR_INJECT); OPAL_CALL(opal_pci_shpc, OPAL_PCI_SHPC); OPAL_CALL(opal_pci_phb_mmio_enable, OPAL_PCI_PHB_MMIO_ENABLE); OPAL_CALL(opal_pci_set_phb_mem_window, OPAL_PCI_SET_PHB_MEM_WINDOW); OPAL_CALL(opal_pci_map_pe_mmio_window, OPAL_PCI_MAP_PE_MMIO_WINDOW); OPAL_CALL(opal_pci_set_phb_table_memory, OPAL_PCI_SET_PHB_TABLE_MEMORY); OPAL_CALL(opal_pci_set_pe, OPAL_PCI_SET_PE); OPAL_CALL(opal_pci_set_peltv, OPAL_PCI_SET_PELTV); OPAL_CALL(opal_pci_get_xive_reissue, OPAL_PCI_GET_XIVE_REISSUE); OPAL_CALL(opal_pci_set_xive_reissue, OPAL_PCI_SET_XIVE_REISSUE); OPAL_CALL(opal_pci_set_xive_pe, OPAL_PCI_SET_XIVE_PE); OPAL_CALL(opal_get_xive_source, OPAL_GET_XIVE_SOURCE); OPAL_CALL(opal_get_msi_32, OPAL_GET_MSI_32); OPAL_CALL(opal_get_msi_64, OPAL_GET_MSI_64); OPAL_CALL(opal_start_cpu, OPAL_START_CPU); OPAL_CALL(opal_query_cpu_status, OPAL_QUERY_CPU_STATUS); OPAL_CALL(opal_write_oppanel, OPAL_WRITE_OPPANEL); OPAL_CALL(opal_pci_map_pe_dma_window, OPAL_PCI_MAP_PE_DMA_WINDOW); OPAL_CALL(opal_pci_map_pe_dma_window_real, OPAL_PCI_MAP_PE_DMA_WINDOW_REAL); OPAL_CALL(opal_pci_reset, OPAL_PCI_RESET); OPAL_CALL(opal_pci_get_hub_diag_data, OPAL_PCI_GET_HUB_DIAG_DATA); OPAL_CALL(opal_pci_get_phb_diag_data, OPAL_PCI_GET_PHB_DIAG_DATA); OPAL_CALL(opal_pci_fence_phb, OPAL_PCI_FENCE_PHB); OPAL_CALL(opal_pci_reinit, OPAL_PCI_REINIT); OPAL_CALL(opal_pci_mask_pe_error, OPAL_PCI_MASK_PE_ERROR); OPAL_CALL(opal_set_slot_led_status, OPAL_SET_SLOT_LED_STATUS); OPAL_CALL(opal_get_epow_status, OPAL_GET_EPOW_STATUS); OPAL_CALL(opal_get_dpo_status, OPAL_GET_DPO_STATUS); OPAL_CALL(opal_set_system_attention_led, OPAL_SET_SYSTEM_ATTENTION_LED); OPAL_CALL(opal_pci_next_error, OPAL_PCI_NEXT_ERROR); OPAL_CALL(opal_pci_poll, OPAL_PCI_POLL); OPAL_CALL(opal_pci_msi_eoi, OPAL_PCI_MSI_EOI); OPAL_CALL(opal_pci_get_phb_diag_data2, OPAL_PCI_GET_PHB_DIAG_DATA2); OPAL_CALL(opal_xscom_read, OPAL_XSCOM_READ); OPAL_CALL(opal_xscom_write, OPAL_XSCOM_WRITE); OPAL_CALL(opal_lpc_read, OPAL_LPC_READ); OPAL_CALL(opal_lpc_write, OPAL_LPC_WRITE); OPAL_CALL(opal_return_cpu, OPAL_RETURN_CPU); OPAL_CALL(opal_reinit_cpus, OPAL_REINIT_CPUS); OPAL_CALL(opal_read_elog, OPAL_ELOG_READ); OPAL_CALL(opal_send_ack_elog, OPAL_ELOG_ACK); OPAL_CALL(opal_get_elog_size, OPAL_ELOG_SIZE); OPAL_CALL(opal_resend_pending_logs, OPAL_ELOG_RESEND); OPAL_CALL(opal_write_elog, OPAL_ELOG_WRITE); OPAL_CALL(opal_validate_flash, OPAL_FLASH_VALIDATE); OPAL_CALL(opal_manage_flash, OPAL_FLASH_MANAGE); OPAL_CALL(opal_update_flash, OPAL_FLASH_UPDATE); OPAL_CALL(opal_resync_timebase, OPAL_RESYNC_TIMEBASE); OPAL_CALL(opal_check_token, OPAL_CHECK_TOKEN); OPAL_CALL(opal_dump_init, OPAL_DUMP_INIT); OPAL_CALL(opal_dump_info, OPAL_DUMP_INFO); OPAL_CALL(opal_dump_info2, OPAL_DUMP_INFO2); OPAL_CALL(opal_dump_read, OPAL_DUMP_READ); OPAL_CALL(opal_dump_ack, OPAL_DUMP_ACK); OPAL_CALL(opal_get_msg, OPAL_GET_MSG); OPAL_CALL(opal_write_oppanel_async, OPAL_WRITE_OPPANEL_ASYNC); OPAL_CALL(opal_check_completion, OPAL_CHECK_ASYNC_COMPLETION); OPAL_CALL(opal_dump_resend_notification, OPAL_DUMP_RESEND); OPAL_CALL(opal_sync_host_reboot, OPAL_SYNC_HOST_REBOOT); OPAL_CALL(opal_sensor_read, OPAL_SENSOR_READ); OPAL_CALL(opal_get_param, OPAL_GET_PARAM); OPAL_CALL(opal_set_param, OPAL_SET_PARAM); OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI); OPAL_CALL(opal_handle_hmi2, OPAL_HANDLE_HMI2); OPAL_CALL(opal_config_cpu_idle_state, OPAL_CONFIG_CPU_IDLE_STATE); OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG); OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION); OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION); OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CAPI_MODE); OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO); OPAL_CALL(opal_tpo_read, OPAL_READ_TPO); OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND); OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV); OPAL_CALL(opal_i2c_request, OPAL_I2C_REQUEST); OPAL_CALL(opal_flash_read, OPAL_FLASH_READ); OPAL_CALL(opal_flash_write, OPAL_FLASH_WRITE); OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE); OPAL_CALL(opal_prd_msg, OPAL_PRD_MSG); OPAL_CALL(opal_leds_get_ind, OPAL_LEDS_GET_INDICATOR); OPAL_CALL(opal_leds_set_ind, OPAL_LEDS_SET_INDICATOR); OPAL_CALL(opal_console_flush, OPAL_CONSOLE_FLUSH); OPAL_CALL(opal_get_device_tree, OPAL_GET_DEVICE_TREE); OPAL_CALL(opal_pci_get_presence_state, OPAL_PCI_GET_PRESENCE_STATE); OPAL_CALL(opal_pci_get_power_state, OPAL_PCI_GET_POWER_STATE); OPAL_CALL(opal_pci_set_power_state, OPAL_PCI_SET_POWER_STATE); OPAL_CALL(opal_int_get_xirr, OPAL_INT_GET_XIRR); OPAL_CALL(opal_int_set_cppr, OPAL_INT_SET_CPPR); OPAL_CALL(opal_int_eoi, OPAL_INT_EOI); OPAL_CALL(opal_int_set_mfrr, OPAL_INT_SET_MFRR); OPAL_CALL(opal_pci_tce_kill, OPAL_PCI_TCE_KILL); OPAL_CALL(opal_nmmu_set_ptcr, OPAL_NMMU_SET_PTCR); OPAL_CALL(opal_xive_reset, OPAL_XIVE_RESET); OPAL_CALL(opal_xive_get_irq_info, OPAL_XIVE_GET_IRQ_INFO); OPAL_CALL(opal_xive_get_irq_config, OPAL_XIVE_GET_IRQ_CONFIG); OPAL_CALL(opal_xive_set_irq_config, OPAL_XIVE_SET_IRQ_CONFIG); OPAL_CALL(opal_xive_get_queue_info, OPAL_XIVE_GET_QUEUE_INFO); OPAL_CALL(opal_xive_set_queue_info, OPAL_XIVE_SET_QUEUE_INFO); OPAL_CALL(opal_xive_donate_page, OPAL_XIVE_DONATE_PAGE); OPAL_CALL(opal_xive_alloc_vp_block, OPAL_XIVE_ALLOCATE_VP_BLOCK); OPAL_CALL(opal_xive_free_vp_block, OPAL_XIVE_FREE_VP_BLOCK); OPAL_CALL(opal_xive_allocate_irq_raw, OPAL_XIVE_ALLOCATE_IRQ); OPAL_CALL(opal_xive_free_irq, OPAL_XIVE_FREE_IRQ); OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO); OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO); OPAL_CALL(opal_xive_sync, OPAL_XIVE_SYNC); OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP); OPAL_CALL(opal_xive_get_queue_state, OPAL_XIVE_GET_QUEUE_STATE); OPAL_CALL(opal_xive_set_queue_state, OPAL_XIVE_SET_QUEUE_STATE); OPAL_CALL(opal_xive_get_vp_state, OPAL_XIVE_GET_VP_STATE); OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET); OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR); OPAL_CALL(opal_imc_counters_init, OPAL_IMC_COUNTERS_INIT); OPAL_CALL(opal_imc_counters_start, OPAL_IMC_COUNTERS_START); OPAL_CALL(opal_imc_counters_stop, OPAL_IMC_COUNTERS_STOP); OPAL_CALL(opal_get_powercap, OPAL_GET_POWERCAP); OPAL_CALL(opal_set_powercap, OPAL_SET_POWERCAP); OPAL_CALL(opal_get_power_shift_ratio, OPAL_GET_POWER_SHIFT_RATIO); OPAL_CALL(opal_set_power_shift_ratio, OPAL_SET_POWER_SHIFT_RATIO); OPAL_CALL(opal_sensor_group_clear, OPAL_SENSOR_GROUP_CLEAR); OPAL_CALL(opal_quiesce, OPAL_QUIESCE); OPAL_CALL(opal_npu_spa_setup, OPAL_NPU_SPA_SETUP); OPAL_CALL(opal_npu_spa_clear_cache, OPAL_NPU_SPA_CLEAR_CACHE); OPAL_CALL(opal_npu_tl_set, OPAL_NPU_TL_SET); OPAL_CALL(opal_pci_get_pbcq_tunnel_bar, OPAL_PCI_GET_PBCQ_TUNNEL_BAR); OPAL_CALL(opal_pci_set_pbcq_tunnel_bar, OPAL_PCI_SET_PBCQ_TUNNEL_BAR); OPAL_CALL(opal_sensor_read_u64, OPAL_SENSOR_READ_U64); OPAL_CALL(opal_sensor_group_enable, OPAL_SENSOR_GROUP_ENABLE); OPAL_CALL(opal_nx_coproc_init, OPAL_NX_COPROC_INIT); OPAL_CALL(opal_mpipl_update, OPAL_MPIPL_UPDATE); OPAL_CALL(opal_mpipl_register_tag, OPAL_MPIPL_REGISTER_TAG); OPAL_CALL(opal_mpipl_query_tag, OPAL_MPIPL_QUERY_TAG); OPAL_CALL(opal_secvar_get, OPAL_SECVAR_GET); OPAL_CALL(opal_secvar_get_next, OPAL_SECVAR_GET_NEXT); OPAL_CALL(opal_secvar_enqueue_update, OPAL_SECVAR_ENQUEUE_UPDATE);
linux-master
arch/powerpc/platforms/powernv/opal-call.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * OPAL hypervisor Maintenance interrupt handling support in PowerNV. * * Copyright 2014 IBM Corporation * Author: Mahesh Salgaonkar <[email protected]> */ #undef DEBUG #include <linux/kernel.h> #include <linux/init.h> #include <linux/of.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/opal.h> #include <asm/cputable.h> #include <asm/machdep.h> #include "powernv.h" static int opal_hmi_handler_nb_init; struct OpalHmiEvtNode { struct list_head list; struct OpalHMIEvent hmi_evt; }; struct xstop_reason { uint32_t xstop_reason; const char *unit_failed; const char *description; }; static LIST_HEAD(opal_hmi_evt_list); static DEFINE_SPINLOCK(opal_hmi_evt_lock); static void print_core_checkstop_reason(const char *level, struct OpalHMIEvent *hmi_evt) { int i; static const struct xstop_reason xstop_reason[] = { { CORE_CHECKSTOP_IFU_REGFILE, "IFU", "RegFile core check stop" }, { CORE_CHECKSTOP_IFU_LOGIC, "IFU", "Logic core check stop" }, { CORE_CHECKSTOP_PC_DURING_RECOV, "PC", "Core checkstop during recovery" }, { CORE_CHECKSTOP_ISU_REGFILE, "ISU", "RegFile core check stop (mapper error)" }, { CORE_CHECKSTOP_ISU_LOGIC, "ISU", "Logic core check stop" }, { CORE_CHECKSTOP_FXU_LOGIC, "FXU", "Logic core check stop" }, { CORE_CHECKSTOP_VSU_LOGIC, "VSU", "Logic core check stop" }, { CORE_CHECKSTOP_PC_RECOV_IN_MAINT_MODE, "PC", "Recovery in maintenance mode" }, { CORE_CHECKSTOP_LSU_REGFILE, "LSU", "RegFile core check stop" }, { CORE_CHECKSTOP_PC_FWD_PROGRESS, "PC", "Forward Progress Error" }, { CORE_CHECKSTOP_LSU_LOGIC, "LSU", "Logic core check stop" }, { CORE_CHECKSTOP_PC_LOGIC, "PC", "Logic core check stop" }, { CORE_CHECKSTOP_PC_HYP_RESOURCE, "PC", "Hypervisor Resource error - core check stop" }, { CORE_CHECKSTOP_PC_HANG_RECOV_FAILED, "PC", "Hang Recovery Failed (core check stop)" }, { CORE_CHECKSTOP_PC_AMBI_HANG_DETECTED, "PC", "Ambiguous Hang Detected (unknown source)" }, { CORE_CHECKSTOP_PC_DEBUG_TRIG_ERR_INJ, "PC", "Debug Trigger Error inject" }, { CORE_CHECKSTOP_PC_SPRD_HYP_ERR_INJ, "PC", "Hypervisor check stop via SPRC/SPRD" }, }; /* Validity check */ if (!hmi_evt->u.xstop_error.xstop_reason) { printk("%s Unknown Core check stop.\n", level); return; } printk("%s CPU PIR: %08x\n", level, be32_to_cpu(hmi_evt->u.xstop_error.u.pir)); for (i = 0; i < ARRAY_SIZE(xstop_reason); i++) if (be32_to_cpu(hmi_evt->u.xstop_error.xstop_reason) & xstop_reason[i].xstop_reason) printk("%s [Unit: %-3s] %s\n", level, xstop_reason[i].unit_failed, xstop_reason[i].description); } static void print_nx_checkstop_reason(const char *level, struct OpalHMIEvent *hmi_evt) { int i; static const struct xstop_reason xstop_reason[] = { { NX_CHECKSTOP_SHM_INVAL_STATE_ERR, "DMA & Engine", "SHM invalid state error" }, { NX_CHECKSTOP_DMA_INVAL_STATE_ERR_1, "DMA & Engine", "DMA invalid state error bit 15" }, { NX_CHECKSTOP_DMA_INVAL_STATE_ERR_2, "DMA & Engine", "DMA invalid state error bit 16" }, { NX_CHECKSTOP_DMA_CH0_INVAL_STATE_ERR, "DMA & Engine", "Channel 0 invalid state error" }, { NX_CHECKSTOP_DMA_CH1_INVAL_STATE_ERR, "DMA & Engine", "Channel 1 invalid state error" }, { NX_CHECKSTOP_DMA_CH2_INVAL_STATE_ERR, "DMA & Engine", "Channel 2 invalid state error" }, { NX_CHECKSTOP_DMA_CH3_INVAL_STATE_ERR, "DMA & Engine", "Channel 3 invalid state error" }, { NX_CHECKSTOP_DMA_CH4_INVAL_STATE_ERR, "DMA & Engine", "Channel 4 invalid state error" }, { NX_CHECKSTOP_DMA_CH5_INVAL_STATE_ERR, "DMA & Engine", "Channel 5 invalid state error" }, { NX_CHECKSTOP_DMA_CH6_INVAL_STATE_ERR, "DMA & Engine", "Channel 6 invalid state error" }, { NX_CHECKSTOP_DMA_CH7_INVAL_STATE_ERR, "DMA & Engine", "Channel 7 invalid state error" }, { NX_CHECKSTOP_DMA_CRB_UE, "DMA & Engine", "UE error on CRB(CSB address, CCB)" }, { NX_CHECKSTOP_DMA_CRB_SUE, "DMA & Engine", "SUE error on CRB(CSB address, CCB)" }, { NX_CHECKSTOP_PBI_ISN_UE, "PowerBus Interface", "CRB Kill ISN received while holding ISN with UE error" }, }; /* Validity check */ if (!hmi_evt->u.xstop_error.xstop_reason) { printk("%s Unknown NX check stop.\n", level); return; } printk("%s NX checkstop on CHIP ID: %x\n", level, be32_to_cpu(hmi_evt->u.xstop_error.u.chip_id)); for (i = 0; i < ARRAY_SIZE(xstop_reason); i++) if (be32_to_cpu(hmi_evt->u.xstop_error.xstop_reason) & xstop_reason[i].xstop_reason) printk("%s [Unit: %-3s] %s\n", level, xstop_reason[i].unit_failed, xstop_reason[i].description); } static void print_npu_checkstop_reason(const char *level, struct OpalHMIEvent *hmi_evt) { uint8_t reason, reason_count, i; /* * We may not have a checkstop reason on some combination of * hardware and/or skiboot version */ if (!hmi_evt->u.xstop_error.xstop_reason) { printk("%s NPU checkstop on chip %x\n", level, be32_to_cpu(hmi_evt->u.xstop_error.u.chip_id)); return; } /* * NPU2 has 3 FIRs. Reason encoded on a byte as: * 2 bits for the FIR number * 6 bits for the bit number * It may be possible to find several reasons. * * We don't display a specific message per FIR bit as there * are too many and most are meaningless without the workbook * and/or hw team help anyway. */ reason_count = sizeof(hmi_evt->u.xstop_error.xstop_reason) / sizeof(reason); for (i = 0; i < reason_count; i++) { reason = (hmi_evt->u.xstop_error.xstop_reason >> (8 * i)) & 0xFF; if (reason) printk("%s NPU checkstop on chip %x: FIR%d bit %d is set\n", level, be32_to_cpu(hmi_evt->u.xstop_error.u.chip_id), reason >> 6, reason & 0x3F); } } static void print_checkstop_reason(const char *level, struct OpalHMIEvent *hmi_evt) { uint8_t type = hmi_evt->u.xstop_error.xstop_type; switch (type) { case CHECKSTOP_TYPE_CORE: print_core_checkstop_reason(level, hmi_evt); break; case CHECKSTOP_TYPE_NX: print_nx_checkstop_reason(level, hmi_evt); break; case CHECKSTOP_TYPE_NPU: print_npu_checkstop_reason(level, hmi_evt); break; default: printk("%s Unknown Malfunction Alert of type %d\n", level, type); break; } } static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt) { const char *level, *sevstr, *error_info; static const char *hmi_error_types[] = { "Malfunction Alert", "Processor Recovery done", "Processor recovery occurred again", "Processor recovery occurred for masked error", "Timer facility experienced an error", "TFMR SPR is corrupted", "UPS (Uninterrupted Power System) Overflow indication", "An XSCOM operation failure", "An XSCOM operation completed", "SCOM has set a reserved FIR bit to cause recovery", "Debug trigger has set a reserved FIR bit to cause recovery", "A hypervisor resource error occurred", "CAPP recovery process is in progress", }; static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); /* Print things out */ if (hmi_evt->version < OpalHMIEvt_V1) { pr_err("HMI Interrupt, Unknown event version %d !\n", hmi_evt->version); return; } switch (hmi_evt->severity) { case OpalHMI_SEV_NO_ERROR: level = KERN_INFO; sevstr = "Harmless"; break; case OpalHMI_SEV_WARNING: level = KERN_WARNING; sevstr = ""; break; case OpalHMI_SEV_ERROR_SYNC: level = KERN_ERR; sevstr = "Severe"; break; case OpalHMI_SEV_FATAL: default: level = KERN_ERR; sevstr = "Fatal"; break; } if (hmi_evt->severity != OpalHMI_SEV_NO_ERROR || __ratelimit(&rs)) { printk("%s%s Hypervisor Maintenance interrupt [%s]\n", level, sevstr, hmi_evt->disposition == OpalHMI_DISPOSITION_RECOVERED ? "Recovered" : "Not recovered"); error_info = hmi_evt->type < ARRAY_SIZE(hmi_error_types) ? hmi_error_types[hmi_evt->type] : "Unknown"; printk("%s Error detail: %s\n", level, error_info); printk("%s HMER: %016llx\n", level, be64_to_cpu(hmi_evt->hmer)); if ((hmi_evt->type == OpalHMI_ERROR_TFAC) || (hmi_evt->type == OpalHMI_ERROR_TFMR_PARITY)) printk("%s TFMR: %016llx\n", level, be64_to_cpu(hmi_evt->tfmr)); } if (hmi_evt->version < OpalHMIEvt_V2) return; /* OpalHMIEvt_V2 and above provides reason for malfunction alert. */ if (hmi_evt->type == OpalHMI_ERROR_MALFUNC_ALERT) print_checkstop_reason(level, hmi_evt); } static void hmi_event_handler(struct work_struct *work) { unsigned long flags; struct OpalHMIEvent *hmi_evt; struct OpalHmiEvtNode *msg_node; uint8_t disposition; struct opal_msg msg; int unrecoverable = 0; spin_lock_irqsave(&opal_hmi_evt_lock, flags); while (!list_empty(&opal_hmi_evt_list)) { msg_node = list_entry(opal_hmi_evt_list.next, struct OpalHmiEvtNode, list); list_del(&msg_node->list); spin_unlock_irqrestore(&opal_hmi_evt_lock, flags); hmi_evt = (struct OpalHMIEvent *) &msg_node->hmi_evt; print_hmi_event_info(hmi_evt); disposition = hmi_evt->disposition; kfree(msg_node); /* * Check if HMI event has been recovered or not. If not * then kernel can't continue, we need to panic. * But before we do that, display all the HMI event * available on the list and set unrecoverable flag to 1. */ if (disposition != OpalHMI_DISPOSITION_RECOVERED) unrecoverable = 1; spin_lock_irqsave(&opal_hmi_evt_lock, flags); } spin_unlock_irqrestore(&opal_hmi_evt_lock, flags); if (unrecoverable) { /* Pull all HMI events from OPAL before we panic. */ while (opal_get_msg(__pa(&msg), sizeof(msg)) == OPAL_SUCCESS) { u32 type; type = be32_to_cpu(msg.msg_type); /* skip if not HMI event */ if (type != OPAL_MSG_HMI_EVT) continue; /* HMI event info starts from param[0] */ hmi_evt = (struct OpalHMIEvent *)&msg.params[0]; print_hmi_event_info(hmi_evt); } pnv_platform_error_reboot(NULL, "Unrecoverable HMI exception"); } } static DECLARE_WORK(hmi_event_work, hmi_event_handler); /* * opal_handle_hmi_event - notifier handler that queues up HMI events * to be preocessed later. */ static int opal_handle_hmi_event(struct notifier_block *nb, unsigned long msg_type, void *msg) { unsigned long flags; struct OpalHMIEvent *hmi_evt; struct opal_msg *hmi_msg = msg; struct OpalHmiEvtNode *msg_node; /* Sanity Checks */ if (msg_type != OPAL_MSG_HMI_EVT) return 0; /* HMI event info starts from param[0] */ hmi_evt = (struct OpalHMIEvent *)&hmi_msg->params[0]; /* Delay the logging of HMI events to workqueue. */ msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC); if (!msg_node) { pr_err("HMI: out of memory, Opal message event not handled\n"); return -ENOMEM; } memcpy(&msg_node->hmi_evt, hmi_evt, sizeof(*hmi_evt)); spin_lock_irqsave(&opal_hmi_evt_lock, flags); list_add(&msg_node->list, &opal_hmi_evt_list); spin_unlock_irqrestore(&opal_hmi_evt_lock, flags); schedule_work(&hmi_event_work); return 0; } static struct notifier_block opal_hmi_handler_nb = { .notifier_call = opal_handle_hmi_event, .next = NULL, .priority = 0, }; int __init opal_hmi_handler_init(void) { int ret; if (!opal_hmi_handler_nb_init) { ret = opal_message_notifier_register( OPAL_MSG_HMI_EVT, &opal_hmi_handler_nb); if (ret) { pr_err("%s: Can't register OPAL event notifier (%d)\n", __func__, ret); return ret; } opal_hmi_handler_nb_init = 1; } return 0; }
linux-master
arch/powerpc/platforms/powernv/opal-hmi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * This file implements an irqchip for OPAL events. Whenever there is * an interrupt that is handled by OPAL we get passed a list of events * that Linux needs to do something about. These basically look like * interrupts to Linux so we implement an irqchip to handle them. * * Copyright Alistair Popple, IBM Corporation 2014. */ #include <linux/bitops.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/kthread.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/of_irq.h> #include <asm/machdep.h> #include <asm/opal.h> #include "powernv.h" /* Maximum number of events supported by OPAL firmware */ #define MAX_NUM_EVENTS 64 struct opal_event_irqchip { struct irq_chip irqchip; struct irq_domain *domain; unsigned long mask; }; static struct opal_event_irqchip opal_event_irqchip; static u64 last_outstanding_events; static int opal_irq_count; static struct resource *opal_irqs; void opal_handle_events(void) { __be64 events = 0; u64 e; e = READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask; again: while (e) { int hwirq; hwirq = fls64(e) - 1; e &= ~BIT_ULL(hwirq); local_irq_disable(); irq_enter(); generic_handle_domain_irq(opal_event_irqchip.domain, hwirq); irq_exit(); local_irq_enable(); cond_resched(); } WRITE_ONCE(last_outstanding_events, 0); if (opal_poll_events(&events) != OPAL_SUCCESS) return; e = be64_to_cpu(events) & opal_event_irqchip.mask; if (e) goto again; } bool opal_have_pending_events(void) { if (READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask) return true; return false; } static void opal_event_mask(struct irq_data *d) { clear_bit(d->hwirq, &opal_event_irqchip.mask); } static void opal_event_unmask(struct irq_data *d) { set_bit(d->hwirq, &opal_event_irqchip.mask); if (opal_have_pending_events()) opal_wake_poller(); } static int opal_event_set_type(struct irq_data *d, unsigned int flow_type) { /* * For now we only support level triggered events. The irq * handler will be called continuously until the event has * been cleared in OPAL. */ if (flow_type != IRQ_TYPE_LEVEL_HIGH) return -EINVAL; return 0; } static struct opal_event_irqchip opal_event_irqchip = { .irqchip = { .name = "OPAL EVT", .irq_mask = opal_event_mask, .irq_unmask = opal_event_unmask, .irq_set_type = opal_event_set_type, }, .mask = 0, }; static int opal_event_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_data(irq, &opal_event_irqchip); irq_set_chip_and_handler(irq, &opal_event_irqchip.irqchip, handle_level_irq); return 0; } static irqreturn_t opal_interrupt(int irq, void *data) { __be64 events; opal_handle_interrupt(virq_to_hw(irq), &events); WRITE_ONCE(last_outstanding_events, be64_to_cpu(events)); if (opal_have_pending_events()) opal_wake_poller(); return IRQ_HANDLED; } static int opal_event_match(struct irq_domain *h, struct device_node *node, enum irq_domain_bus_token bus_token) { return irq_domain_get_of_node(h) == node; } static int opal_event_xlate(struct irq_domain *h, struct device_node *np, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { *out_hwirq = intspec[0]; *out_flags = IRQ_TYPE_LEVEL_HIGH; return 0; } static const struct irq_domain_ops opal_event_domain_ops = { .match = opal_event_match, .map = opal_event_map, .xlate = opal_event_xlate, }; void opal_event_shutdown(void) { unsigned int i; /* First free interrupts, which will also mask them */ for (i = 0; i < opal_irq_count; i++) { if (!opal_irqs || !opal_irqs[i].start) continue; if (in_interrupt() || irqs_disabled()) disable_irq_nosync(opal_irqs[i].start); else free_irq(opal_irqs[i].start, NULL); opal_irqs[i].start = 0; } } int __init opal_event_init(void) { struct device_node *dn, *opal_node; bool old_style = false; int i, rc = 0; opal_node = of_find_node_by_path("/ibm,opal"); if (!opal_node) { pr_warn("opal: Node not found\n"); return -ENODEV; } /* If dn is NULL it means the domain won't be linked to a DT * node so therefore irq_of_parse_and_map(...) wont work. But * that shouldn't be problem because if we're running a * version of skiboot that doesn't have the dn then the * devices won't have the correct properties and will have to * fall back to the legacy method (opal_event_request(...)) * anyway. */ dn = of_find_compatible_node(NULL, NULL, "ibm,opal-event"); opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS, &opal_event_domain_ops, &opal_event_irqchip); of_node_put(dn); if (!opal_event_irqchip.domain) { pr_warn("opal: Unable to create irq domain\n"); rc = -ENOMEM; goto out; } /* Look for new-style (standard) "interrupts" property */ opal_irq_count = of_irq_count(opal_node); /* Absent ? Look for the old one */ if (opal_irq_count < 1) { /* Get opal-interrupts property and names if present */ rc = of_property_count_u32_elems(opal_node, "opal-interrupts"); if (rc > 0) opal_irq_count = rc; old_style = true; } /* No interrupts ? Bail out */ if (!opal_irq_count) goto out; pr_debug("OPAL: Found %d interrupts reserved for OPAL using %s scheme\n", opal_irq_count, old_style ? "old" : "new"); /* Allocate an IRQ resources array */ opal_irqs = kcalloc(opal_irq_count, sizeof(struct resource), GFP_KERNEL); if (WARN_ON(!opal_irqs)) { rc = -ENOMEM; goto out; } /* Build the resources array */ if (old_style) { /* Old style "opal-interrupts" property */ for (i = 0; i < opal_irq_count; i++) { struct resource *r = &opal_irqs[i]; const char *name = NULL; u32 hw_irq; int virq; rc = of_property_read_u32_index(opal_node, "opal-interrupts", i, &hw_irq); if (WARN_ON(rc < 0)) { opal_irq_count = i; break; } of_property_read_string_index(opal_node, "opal-interrupts-names", i, &name); virq = irq_create_mapping(NULL, hw_irq); if (!virq) { pr_warn("Failed to map OPAL irq 0x%x\n", hw_irq); continue; } r->start = r->end = virq; r->flags = IORESOURCE_IRQ | IRQ_TYPE_LEVEL_LOW; r->name = name; } } else { /* new style standard "interrupts" property */ rc = of_irq_to_resource_table(opal_node, opal_irqs, opal_irq_count); if (WARN_ON(rc < 0)) { opal_irq_count = 0; kfree(opal_irqs); goto out; } if (WARN_ON(rc < opal_irq_count)) opal_irq_count = rc; } /* Install interrupt handlers */ for (i = 0; i < opal_irq_count; i++) { struct resource *r = &opal_irqs[i]; const char *name; /* Prefix name */ if (r->name && strlen(r->name)) name = kasprintf(GFP_KERNEL, "opal-%s", r->name); else name = kasprintf(GFP_KERNEL, "opal"); /* Install interrupt handler */ rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK, name, NULL); if (rc) { pr_warn("Error %d requesting OPAL irq %d\n", rc, (int)r->start); continue; } } rc = 0; out: of_node_put(opal_node); return rc; } machine_arch_initcall(powernv, opal_event_init); /** * opal_event_request(unsigned int opal_event_nr) - Request an event * @opal_event_nr: the opal event number to request * * This routine can be used to find the linux virq number which can * then be passed to request_irq to assign a handler for a particular * opal event. This should only be used by legacy devices which don't * have proper device tree bindings. Most devices should use * irq_of_parse_and_map() instead. */ int opal_event_request(unsigned int opal_event_nr) { if (WARN_ON_ONCE(!opal_event_irqchip.domain)) return 0; return irq_create_mapping(opal_event_irqchip.domain, opal_event_nr); } EXPORT_SYMBOL(opal_event_request);
linux-master
arch/powerpc/platforms/powernv/opal-irqchip.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * OPAL IMC interface detection driver * Supported on POWERNV platform * * Copyright (C) 2017 Madhavan Srinivasan, IBM Corporation. * (C) 2017 Anju T Sudhakar, IBM Corporation. * (C) 2017 Hemant K Shaw, IBM Corporation. */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/crash_dump.h> #include <linux/debugfs.h> #include <asm/opal.h> #include <asm/io.h> #include <asm/imc-pmu.h> #include <asm/cputhreads.h> static struct dentry *imc_debugfs_parent; /* Helpers to export imc command and mode via debugfs */ static int imc_mem_get(void *data, u64 *val) { *val = cpu_to_be64(*(u64 *)data); return 0; } static int imc_mem_set(void *data, u64 val) { *(u64 *)data = cpu_to_be64(val); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_imc_x64, imc_mem_get, imc_mem_set, "0x%016llx\n"); static void imc_debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent, u64 *value) { debugfs_create_file_unsafe(name, mode, parent, value, &fops_imc_x64); } /* * export_imc_mode_and_cmd: Create a debugfs interface * for imc_cmd and imc_mode * for each node in the system. * imc_mode and imc_cmd can be changed by echo into * this interface. */ static void export_imc_mode_and_cmd(struct device_node *node, struct imc_pmu *pmu_ptr) { static u64 loc, *imc_mode_addr, *imc_cmd_addr; char mode[16], cmd[16]; u32 cb_offset; struct imc_mem_info *ptr = pmu_ptr->mem_info; imc_debugfs_parent = debugfs_create_dir("imc", arch_debugfs_dir); if (of_property_read_u32(node, "cb_offset", &cb_offset)) cb_offset = IMC_CNTL_BLK_OFFSET; while (ptr->vbase != NULL) { loc = (u64)(ptr->vbase) + cb_offset; imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET); sprintf(mode, "imc_mode_%d", (u32)(ptr->id)); imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent, imc_mode_addr); imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET); sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id)); imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent, imc_cmd_addr); ptr++; } } /* * imc_get_mem_addr_nest: Function to get nest counter memory region * for each chip */ static int imc_get_mem_addr_nest(struct device_node *node, struct imc_pmu *pmu_ptr, u32 offset) { int nr_chips = 0, i; u64 *base_addr_arr, baddr; u32 *chipid_arr; nr_chips = of_property_count_u32_elems(node, "chip-id"); if (nr_chips <= 0) return -ENODEV; base_addr_arr = kcalloc(nr_chips, sizeof(*base_addr_arr), GFP_KERNEL); if (!base_addr_arr) return -ENOMEM; chipid_arr = kcalloc(nr_chips, sizeof(*chipid_arr), GFP_KERNEL); if (!chipid_arr) { kfree(base_addr_arr); return -ENOMEM; } if (of_property_read_u32_array(node, "chip-id", chipid_arr, nr_chips)) goto error; if (of_property_read_u64_array(node, "base-addr", base_addr_arr, nr_chips)) goto error; pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info), GFP_KERNEL); if (!pmu_ptr->mem_info) goto error; for (i = 0; i < nr_chips; i++) { pmu_ptr->mem_info[i].id = chipid_arr[i]; baddr = base_addr_arr[i] + offset; pmu_ptr->mem_info[i].vbase = phys_to_virt(baddr); } pmu_ptr->imc_counter_mmaped = true; kfree(base_addr_arr); kfree(chipid_arr); return 0; error: kfree(base_addr_arr); kfree(chipid_arr); return -1; } /* * imc_pmu_create : Takes the parent device which is the pmu unit, pmu_index * and domain as the inputs. * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets */ static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain) { int ret = 0; struct imc_pmu *pmu_ptr; u32 offset; /* Return for unknown domain */ if (domain < 0) return NULL; /* memory for pmu */ pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL); if (!pmu_ptr) return NULL; /* Set the domain */ pmu_ptr->domain = domain; ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size); if (ret) goto free_pmu; if (!of_property_read_u32(parent, "offset", &offset)) { if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) goto free_pmu; } /* Function to register IMC pmu */ ret = init_imc_pmu(parent, pmu_ptr, pmu_index); if (ret) { pr_err("IMC PMU %s Register failed\n", pmu_ptr->pmu.name); kfree(pmu_ptr->pmu.name); if (pmu_ptr->domain == IMC_DOMAIN_NEST) kfree(pmu_ptr->mem_info); kfree(pmu_ptr); return NULL; } return pmu_ptr; free_pmu: kfree(pmu_ptr); return NULL; } static void disable_nest_pmu_counters(void) { int nid, cpu; const struct cpumask *l_cpumask; cpus_read_lock(); for_each_node_with_cpus(nid) { l_cpumask = cpumask_of_node(nid); cpu = cpumask_first_and(l_cpumask, cpu_online_mask); if (cpu >= nr_cpu_ids) continue; opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, get_hard_smp_processor_id(cpu)); } cpus_read_unlock(); } static void disable_core_pmu_counters(void) { int cpu, rc; cpus_read_lock(); /* Disable the IMC Core functions */ for_each_online_cpu(cpu) { if (cpu_first_thread_sibling(cpu) != cpu) continue; rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, get_hard_smp_processor_id(cpu)); if (rc) pr_err("%s: Failed to stop Core (cpu = %d)\n", __func__, cpu); } cpus_read_unlock(); } int get_max_nest_dev(void) { struct device_node *node; u32 pmu_units = 0, type; for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) { if (of_property_read_u32(node, "type", &type)) continue; if (type == IMC_TYPE_CHIP) pmu_units++; } return pmu_units; } static int opal_imc_counters_probe(struct platform_device *pdev) { struct device_node *imc_dev = pdev->dev.of_node; struct imc_pmu *pmu; int pmu_count = 0, domain; bool core_imc_reg = false, thread_imc_reg = false; u32 type; /* * Check whether this is kdump kernel. If yes, force the engines to * stop and return. */ if (is_kdump_kernel()) { disable_nest_pmu_counters(); disable_core_pmu_counters(); return -ENODEV; } for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) { pmu = NULL; if (of_property_read_u32(imc_dev, "type", &type)) { pr_warn("IMC Device without type property\n"); continue; } switch (type) { case IMC_TYPE_CHIP: domain = IMC_DOMAIN_NEST; break; case IMC_TYPE_CORE: domain =IMC_DOMAIN_CORE; break; case IMC_TYPE_THREAD: domain = IMC_DOMAIN_THREAD; break; case IMC_TYPE_TRACE: domain = IMC_DOMAIN_TRACE; break; default: pr_warn("IMC Unknown Device type \n"); domain = -1; break; } pmu = imc_pmu_create(imc_dev, pmu_count, domain); if (pmu != NULL) { if (domain == IMC_DOMAIN_NEST) { if (!imc_debugfs_parent) export_imc_mode_and_cmd(imc_dev, pmu); pmu_count++; } if (domain == IMC_DOMAIN_CORE) core_imc_reg = true; if (domain == IMC_DOMAIN_THREAD) thread_imc_reg = true; } } /* If core imc is not registered, unregister thread-imc */ if (!core_imc_reg && thread_imc_reg) unregister_thread_imc(); return 0; } static void opal_imc_counters_shutdown(struct platform_device *pdev) { /* * Function only stops the engines which is bare minimum. * TODO: Need to handle proper memory cleanup and pmu * unregister. */ disable_nest_pmu_counters(); disable_core_pmu_counters(); } static const struct of_device_id opal_imc_match[] = { { .compatible = IMC_DTB_COMPAT }, {}, }; static struct platform_driver opal_imc_driver = { .driver = { .name = "opal-imc-counters", .of_match_table = opal_imc_match, }, .probe = opal_imc_counters_probe, .shutdown = opal_imc_counters_shutdown, }; builtin_platform_driver(opal_imc_driver);
linux-master
arch/powerpc/platforms/powernv/opal-imc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * OPAL asynchronus Memory error handling support in PowerNV. * * Copyright 2013 IBM Corporation * Author: Mahesh Salgaonkar <[email protected]> */ #undef DEBUG #include <linux/kernel.h> #include <linux/init.h> #include <linux/of.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/machdep.h> #include <asm/opal.h> #include <asm/cputable.h> static int opal_mem_err_nb_init; static LIST_HEAD(opal_memory_err_list); static DEFINE_SPINLOCK(opal_mem_err_lock); struct OpalMsgNode { struct list_head list; struct opal_msg msg; }; static void handle_memory_error_event(struct OpalMemoryErrorData *merr_evt) { uint64_t paddr_start, paddr_end; pr_debug("%s: Retrieved memory error event, type: 0x%x\n", __func__, merr_evt->type); switch (merr_evt->type) { case OPAL_MEM_ERR_TYPE_RESILIENCE: paddr_start = be64_to_cpu(merr_evt->u.resilience.physical_address_start); paddr_end = be64_to_cpu(merr_evt->u.resilience.physical_address_end); break; case OPAL_MEM_ERR_TYPE_DYN_DALLOC: paddr_start = be64_to_cpu(merr_evt->u.dyn_dealloc.physical_address_start); paddr_end = be64_to_cpu(merr_evt->u.dyn_dealloc.physical_address_end); break; default: return; } for (; paddr_start < paddr_end; paddr_start += PAGE_SIZE) { memory_failure(paddr_start >> PAGE_SHIFT, 0); } } static void handle_memory_error(void) { unsigned long flags; struct OpalMemoryErrorData *merr_evt; struct OpalMsgNode *msg_node; spin_lock_irqsave(&opal_mem_err_lock, flags); while (!list_empty(&opal_memory_err_list)) { msg_node = list_entry(opal_memory_err_list.next, struct OpalMsgNode, list); list_del(&msg_node->list); spin_unlock_irqrestore(&opal_mem_err_lock, flags); merr_evt = (struct OpalMemoryErrorData *) &msg_node->msg.params[0]; handle_memory_error_event(merr_evt); kfree(msg_node); spin_lock_irqsave(&opal_mem_err_lock, flags); } spin_unlock_irqrestore(&opal_mem_err_lock, flags); } static void mem_error_handler(struct work_struct *work) { handle_memory_error(); } static DECLARE_WORK(mem_error_work, mem_error_handler); /* * opal_memory_err_event - notifier handler that queues up the opal message * to be processed later. */ static int opal_memory_err_event(struct notifier_block *nb, unsigned long msg_type, void *msg) { unsigned long flags; struct OpalMsgNode *msg_node; if (msg_type != OPAL_MSG_MEM_ERR) return 0; msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC); if (!msg_node) { pr_err("MEMORY_ERROR: out of memory, Opal message event not" "handled\n"); return -ENOMEM; } memcpy(&msg_node->msg, msg, sizeof(msg_node->msg)); spin_lock_irqsave(&opal_mem_err_lock, flags); list_add(&msg_node->list, &opal_memory_err_list); spin_unlock_irqrestore(&opal_mem_err_lock, flags); schedule_work(&mem_error_work); return 0; } static struct notifier_block opal_mem_err_nb = { .notifier_call = opal_memory_err_event, .next = NULL, .priority = 0, }; static int __init opal_mem_err_init(void) { int ret; if (!opal_mem_err_nb_init) { ret = opal_message_notifier_register( OPAL_MSG_MEM_ERR, &opal_mem_err_nb); if (ret) { pr_err("%s: Can't register OPAL event notifier (%d)\n", __func__, ret); return ret; } opal_mem_err_nb_init = 1; } return 0; } machine_device_initcall(powernv, opal_mem_err_init);
linux-master
arch/powerpc/platforms/powernv/opal-memory-errors.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV OPAL power control for graceful shutdown handling * * Copyright 2015 IBM Corp. */ #define pr_fmt(fmt) "opal-power: " fmt #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/notifier.h> #include <linux/of.h> #include <asm/opal.h> #include <asm/machdep.h> #define SOFT_OFF 0x00 #define SOFT_REBOOT 0x01 /* Detect EPOW event */ static bool detect_epow(void) { u16 epow; int i, rc; __be16 epow_classes; __be16 opal_epow_status[OPAL_SYSEPOW_MAX] = {0}; /* * Check for EPOW event. Kernel sends supported EPOW classes info * to OPAL. OPAL returns EPOW info along with classes present. */ epow_classes = cpu_to_be16(OPAL_SYSEPOW_MAX); rc = opal_get_epow_status(opal_epow_status, &epow_classes); if (rc != OPAL_SUCCESS) { pr_err("Failed to get EPOW event information\n"); return false; } /* Look for EPOW events present */ for (i = 0; i < be16_to_cpu(epow_classes); i++) { epow = be16_to_cpu(opal_epow_status[i]); /* Filter events which do not need shutdown. */ if (i == OPAL_SYSEPOW_POWER) epow &= ~(OPAL_SYSPOWER_CHNG | OPAL_SYSPOWER_FAIL | OPAL_SYSPOWER_INCL); if (epow) return true; } return false; } /* Check for existing EPOW, DPO events */ static bool __init poweroff_pending(void) { int rc; __be64 opal_dpo_timeout; /* Check for DPO event */ rc = opal_get_dpo_status(&opal_dpo_timeout); if (rc == OPAL_SUCCESS) { pr_info("Existing DPO event detected.\n"); return true; } /* Check for EPOW event */ if (detect_epow()) { pr_info("Existing EPOW event detected.\n"); return true; } return false; } /* OPAL power-control events notifier */ static int opal_power_control_event(struct notifier_block *nb, unsigned long msg_type, void *msg) { uint64_t type; switch (msg_type) { case OPAL_MSG_EPOW: if (detect_epow()) { pr_info("EPOW msg received. Powering off system\n"); orderly_poweroff(true); } break; case OPAL_MSG_DPO: pr_info("DPO msg received. Powering off system\n"); orderly_poweroff(true); break; case OPAL_MSG_SHUTDOWN: type = be64_to_cpu(((struct opal_msg *)msg)->params[0]); switch (type) { case SOFT_REBOOT: pr_info("Reboot requested\n"); orderly_reboot(); break; case SOFT_OFF: pr_info("Poweroff requested\n"); orderly_poweroff(true); break; default: pr_err("Unknown power-control type %llu\n", type); } break; default: pr_err("Unknown OPAL message type %lu\n", msg_type); } return 0; } /* OPAL EPOW event notifier block */ static struct notifier_block opal_epow_nb = { .notifier_call = opal_power_control_event, .next = NULL, .priority = 0, }; /* OPAL DPO event notifier block */ static struct notifier_block opal_dpo_nb = { .notifier_call = opal_power_control_event, .next = NULL, .priority = 0, }; /* OPAL power-control event notifier block */ static struct notifier_block opal_power_control_nb = { .notifier_call = opal_power_control_event, .next = NULL, .priority = 0, }; int __init opal_power_control_init(void) { int ret, supported = 0; struct device_node *np; /* Register OPAL power-control events notifier */ ret = opal_message_notifier_register(OPAL_MSG_SHUTDOWN, &opal_power_control_nb); if (ret) pr_err("Failed to register SHUTDOWN notifier, ret = %d\n", ret); /* Determine OPAL EPOW, DPO support */ np = of_find_node_by_path("/ibm,opal/epow"); if (np) { supported = of_device_is_compatible(np, "ibm,opal-v3-epow"); of_node_put(np); } if (!supported) return 0; pr_info("OPAL EPOW, DPO support detected.\n"); /* Register EPOW event notifier */ ret = opal_message_notifier_register(OPAL_MSG_EPOW, &opal_epow_nb); if (ret) pr_err("Failed to register EPOW notifier, ret = %d\n", ret); /* Register DPO event notifier */ ret = opal_message_notifier_register(OPAL_MSG_DPO, &opal_dpo_nb); if (ret) pr_err("Failed to register DPO notifier, ret = %d\n", ret); /* Check for any pending EPOW or DPO events. */ if (poweroff_pending()) orderly_poweroff(true); return 0; }
linux-master
arch/powerpc/platforms/powernv/opal-power.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV setup code. * * Copyright 2011 IBM Corp. */ #undef DEBUG #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/tty.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/seq_buf.h> #include <linux/seq_file.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/interrupt.h> #include <linux/bug.h> #include <linux/pci.h> #include <linux/cpufreq.h> #include <linux/memblock.h> #include <asm/machdep.h> #include <asm/firmware.h> #include <asm/xics.h> #include <asm/xive.h> #include <asm/opal.h> #include <asm/kexec.h> #include <asm/smp.h> #include <asm/tm.h> #include <asm/setup.h> #include <asm/security_features.h> #include "powernv.h" static bool __init fw_feature_is(const char *state, const char *name, struct device_node *fw_features) { struct device_node *np; bool rc = false; np = of_get_child_by_name(fw_features, name); if (np) { rc = of_property_read_bool(np, state); of_node_put(np); } return rc; } static void __init init_fw_feat_flags(struct device_node *np) { if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np)) security_ftr_set(SEC_FTR_SPEC_BAR_ORI31); if (fw_feature_is("enabled", "fw-bcctrl-serialized", np)) security_ftr_set(SEC_FTR_BCCTRL_SERIALISED); if (fw_feature_is("enabled", "inst-l1d-flush-ori30,30,0", np)) security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30); if (fw_feature_is("enabled", "inst-l1d-flush-trig2", np)) security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2); if (fw_feature_is("enabled", "fw-l1d-thread-split", np)) security_ftr_set(SEC_FTR_L1D_THREAD_PRIV); if (fw_feature_is("enabled", "fw-count-cache-disabled", np)) security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED); if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np)) security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST); if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np)) security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE); /* * The features below are enabled by default, so we instead look to see * if firmware has *disabled* them, and clear them if so. */ if (fw_feature_is("disabled", "speculation-policy-favor-security", np)) security_ftr_clear(SEC_FTR_FAVOUR_SECURITY); if (fw_feature_is("disabled", "needs-l1d-flush-msr-pr-0-to-1", np)) security_ftr_clear(SEC_FTR_L1D_FLUSH_PR); if (fw_feature_is("disabled", "needs-l1d-flush-msr-hv-1-to-0", np)) security_ftr_clear(SEC_FTR_L1D_FLUSH_HV); if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np)) security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); if (fw_feature_is("enabled", "no-need-l1d-flush-msr-pr-1-to-0", np)) security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY); if (fw_feature_is("enabled", "no-need-l1d-flush-kernel-on-user-access", np)) security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS); if (fw_feature_is("enabled", "no-need-store-drain-on-priv-state-switch", np)) security_ftr_clear(SEC_FTR_STF_BARRIER); } static void __init pnv_setup_security_mitigations(void) { struct device_node *np, *fw_features; enum l1d_flush_type type; bool enable; /* Default to fallback in case fw-features are not available */ type = L1D_FLUSH_FALLBACK; np = of_find_node_by_name(NULL, "ibm,opal"); fw_features = of_get_child_by_name(np, "fw-features"); of_node_put(np); if (fw_features) { init_fw_feat_flags(fw_features); of_node_put(fw_features); if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2)) type = L1D_FLUSH_MTTRIG; if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30)) type = L1D_FLUSH_ORI; } /* * The issues addressed by the entry and uaccess flush don't affect P7 * or P8, so on bare metal disable them explicitly in case firmware does * not include the features to disable them. POWER9 and newer processors * should have the appropriate firmware flags. */ if (pvr_version_is(PVR_POWER7) || pvr_version_is(PVR_POWER7p) || pvr_version_is(PVR_POWER8E) || pvr_version_is(PVR_POWER8NVL) || pvr_version_is(PVR_POWER8)) { security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY); security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS); } enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); setup_rfi_flush(type, enable); setup_count_cache_flush(); enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); setup_entry_flush(enable); enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS); setup_uaccess_flush(enable); setup_stf_barrier(); } static void __init pnv_check_guarded_cores(void) { struct device_node *dn; int bad_count = 0; for_each_node_by_type(dn, "cpu") { if (of_property_match_string(dn, "status", "bad") >= 0) bad_count++; } if (bad_count) { printk(" _ _______________\n"); pr_cont(" | | / \\\n"); pr_cont(" | | | WARNING! |\n"); pr_cont(" | | | |\n"); pr_cont(" | | | It looks like |\n"); pr_cont(" |_| | you have %*d |\n", 3, bad_count); pr_cont(" _ | guarded cores |\n"); pr_cont(" (_) \\_______________/\n"); } } static void __init pnv_setup_arch(void) { set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); pnv_setup_security_mitigations(); /* Initialize SMP */ pnv_smp_init(); /* Setup RTC and NVRAM callbacks */ if (firmware_has_feature(FW_FEATURE_OPAL)) opal_nvram_init(); /* Enable NAP mode */ powersave_nap = 1; pnv_check_guarded_cores(); /* XXX PMCS */ pnv_rng_init(); } static void __init pnv_add_hw_description(void) { struct device_node *dn; const char *s; dn = of_find_node_by_path("/ibm,opal/firmware"); if (!dn) return; if (of_property_read_string(dn, "version", &s) == 0 || of_property_read_string(dn, "git-id", &s) == 0) seq_buf_printf(&ppc_hw_desc, "opal:%s ", s); if (of_property_read_string(dn, "mi-version", &s) == 0) seq_buf_printf(&ppc_hw_desc, "mi:%s ", s); of_node_put(dn); } static void __init pnv_init(void) { pnv_add_hw_description(); /* * Initialize the LPC bus now so that legacy serial * ports can be found on it */ opal_lpc_init(); #ifdef CONFIG_HVC_OPAL if (firmware_has_feature(FW_FEATURE_OPAL)) hvc_opal_init_early(); else #endif add_preferred_console("hvc", 0, NULL); #ifdef CONFIG_PPC_64S_HASH_MMU if (!radix_enabled()) { size_t size = sizeof(struct slb_entry) * mmu_slb_size; int i; /* Allocate per cpu area to save old slb contents during MCE */ for_each_possible_cpu(i) { paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(size, __alignof__(struct slb_entry), cpu_to_node(i)); } } #endif } static void __init pnv_init_IRQ(void) { /* Try using a XIVE if available, otherwise use a XICS */ if (!xive_native_init()) xics_init(); WARN_ON(!ppc_md.get_irq); } static void pnv_show_cpuinfo(struct seq_file *m) { struct device_node *root; const char *model = ""; root = of_find_node_by_path("/"); if (root) model = of_get_property(root, "model", NULL); seq_printf(m, "machine\t\t: PowerNV %s\n", model); if (firmware_has_feature(FW_FEATURE_OPAL)) seq_printf(m, "firmware\t: OPAL\n"); else seq_printf(m, "firmware\t: BML\n"); of_node_put(root); if (radix_enabled()) seq_printf(m, "MMU\t\t: Radix\n"); else seq_printf(m, "MMU\t\t: Hash\n"); } static void pnv_prepare_going_down(void) { /* * Disable all notifiers from OPAL, we can't * service interrupts anymore anyway */ opal_event_shutdown(); /* Print flash update message if one is scheduled. */ opal_flash_update_print_message(); smp_send_stop(); hard_irq_disable(); } static void __noreturn pnv_restart(char *cmd) { long rc; pnv_prepare_going_down(); do { if (!cmd || !strlen(cmd)) rc = opal_cec_reboot(); else if (strcmp(cmd, "full") == 0) rc = opal_cec_reboot2(OPAL_REBOOT_FULL_IPL, NULL); else if (strcmp(cmd, "mpipl") == 0) rc = opal_cec_reboot2(OPAL_REBOOT_MPIPL, NULL); else if (strcmp(cmd, "error") == 0) rc = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, NULL); else if (strcmp(cmd, "fast") == 0) rc = opal_cec_reboot2(OPAL_REBOOT_FAST, NULL); else rc = OPAL_UNSUPPORTED; if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { /* Opal is busy wait for some time and retry */ opal_poll_events(NULL); mdelay(10); } else if (cmd && rc) { /* Unknown error while issuing reboot */ if (rc == OPAL_UNSUPPORTED) pr_err("Unsupported '%s' reboot.\n", cmd); else pr_err("Unable to issue '%s' reboot. Err=%ld\n", cmd, rc); pr_info("Forcing a cec-reboot\n"); cmd = NULL; rc = OPAL_BUSY; } else if (rc != OPAL_SUCCESS) { /* Unknown error while issuing cec-reboot */ pr_err("Unable to reboot. Err=%ld\n", rc); } } while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT); for (;;) opal_poll_events(NULL); } static void __noreturn pnv_power_off(void) { long rc = OPAL_BUSY; pnv_prepare_going_down(); while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_cec_power_down(0); if (rc == OPAL_BUSY_EVENT) opal_poll_events(NULL); else mdelay(10); } for (;;) opal_poll_events(NULL); } static void __noreturn pnv_halt(void) { pnv_power_off(); } static void pnv_progress(char *s, unsigned short hex) { } static void pnv_shutdown(void) { /* Let the PCI code clear up IODA tables */ pnv_pci_shutdown(); /* * Stop OPAL activity: Unregister all OPAL interrupts so they * don't fire up while we kexec and make sure all potentially * DMA'ing ops are complete (such as dump retrieval). */ opal_shutdown(); } #ifdef CONFIG_KEXEC_CORE static void pnv_kexec_wait_secondaries_down(void) { int my_cpu, i, notified = -1; my_cpu = get_cpu(); for_each_online_cpu(i) { uint8_t status; int64_t rc, timeout = 1000; if (i == my_cpu) continue; for (;;) { rc = opal_query_cpu_status(get_hard_smp_processor_id(i), &status); if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED) break; barrier(); if (i != notified) { printk(KERN_INFO "kexec: waiting for cpu %d " "(physical %d) to enter OPAL\n", i, paca_ptrs[i]->hw_cpu_id); notified = i; } /* * On crash secondaries might be unreachable or hung, * so timeout if we've waited too long * */ mdelay(1); if (timeout-- == 0) { printk(KERN_ERR "kexec: timed out waiting for " "cpu %d (physical %d) to enter OPAL\n", i, paca_ptrs[i]->hw_cpu_id); break; } } } } static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) { u64 reinit_flags; if (xive_enabled()) xive_teardown_cpu(); else xics_kexec_teardown_cpu(secondary); /* On OPAL, we return all CPUs to firmware */ if (!firmware_has_feature(FW_FEATURE_OPAL)) return; if (secondary) { /* Return secondary CPUs to firmware on OPAL v3 */ mb(); get_paca()->kexec_state = KEXEC_STATE_REAL_MODE; mb(); /* Return the CPU to OPAL */ opal_return_cpu(); } else { /* Primary waits for the secondaries to have reached OPAL */ pnv_kexec_wait_secondaries_down(); /* Switch XIVE back to emulation mode */ if (xive_enabled()) xive_shutdown(); /* * We might be running as little-endian - now that interrupts * are disabled, reset the HILE bit to big-endian so we don't * take interrupts in the wrong endian later * * We reinit to enable both radix and hash on P9 to ensure * the mode used by the next kernel is always supported. */ reinit_flags = OPAL_REINIT_CPUS_HILE_BE; if (cpu_has_feature(CPU_FTR_ARCH_300)) reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX | OPAL_REINIT_CPUS_MMU_HASH; opal_reinit_cpus(reinit_flags); } } #endif /* CONFIG_KEXEC_CORE */ #ifdef CONFIG_MEMORY_HOTPLUG static unsigned long pnv_memory_block_size(void) { return memory_block_size; } #endif static void __init pnv_setup_machdep_opal(void) { ppc_md.get_boot_time = opal_get_boot_time; ppc_md.restart = pnv_restart; pm_power_off = pnv_power_off; ppc_md.halt = pnv_halt; /* ppc_md.system_reset_exception gets filled in by pnv_smp_init() */ ppc_md.machine_check_exception = opal_machine_check; ppc_md.mce_check_early_recovery = opal_mce_check_early_recovery; if (opal_check_token(OPAL_HANDLE_HMI2)) ppc_md.hmi_exception_early = opal_hmi_exception_early2; else ppc_md.hmi_exception_early = opal_hmi_exception_early; ppc_md.handle_hmi_exception = opal_handle_hmi_exception; } static int __init pnv_probe(void) { if (firmware_has_feature(FW_FEATURE_OPAL)) pnv_setup_machdep_opal(); pr_debug("PowerNV detected !\n"); pnv_init(); return 1; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM void __init pnv_tm_init(void) { if (!firmware_has_feature(FW_FEATURE_OPAL) || !pvr_version_is(PVR_POWER9) || early_cpu_has_feature(CPU_FTR_TM)) return; if (opal_reinit_cpus(OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED) != OPAL_SUCCESS) return; pr_info("Enabling TM (Transactional Memory) with Suspend Disabled\n"); cur_cpu_spec->cpu_features |= CPU_FTR_TM; /* Make sure "normal" HTM is off (it should be) */ cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_HTM; /* Turn on no suspend mode, and HTM no SC */ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_HTM_NO_SUSPEND | \ PPC_FEATURE2_HTM_NOSC; tm_suspend_disabled = true; } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ /* * Returns the cpu frequency for 'cpu' in Hz. This is used by * /proc/cpuinfo */ static unsigned long pnv_get_proc_freq(unsigned int cpu) { unsigned long ret_freq; ret_freq = cpufreq_get(cpu) * 1000ul; /* * If the backend cpufreq driver does not exist, * then fallback to old way of reporting the clockrate. */ if (!ret_freq) ret_freq = ppc_proc_freq; return ret_freq; } static long pnv_machine_check_early(struct pt_regs *regs) { long handled = 0; if (cur_cpu_spec && cur_cpu_spec->machine_check_early) handled = cur_cpu_spec->machine_check_early(regs); return handled; } define_machine(powernv) { .name = "PowerNV", .compatible = "ibm,powernv", .probe = pnv_probe, .setup_arch = pnv_setup_arch, .init_IRQ = pnv_init_IRQ, .show_cpuinfo = pnv_show_cpuinfo, .get_proc_freq = pnv_get_proc_freq, .discover_phbs = pnv_pci_init, .progress = pnv_progress, .machine_shutdown = pnv_shutdown, .power_save = NULL, .machine_check_early = pnv_machine_check_early, #ifdef CONFIG_KEXEC_CORE .kexec_cpu_down = pnv_kexec_cpu_down, #endif #ifdef CONFIG_MEMORY_HOTPLUG .memory_block_size = pnv_memory_block_size, #endif };
linux-master
arch/powerpc/platforms/powernv/setup.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2016-17 IBM Corp. */ #define pr_fmt(fmt) "vas: " fmt #include <linux/types.h> #include <linux/slab.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <asm/vas.h> #include "vas.h" static struct dentry *vas_debugfs; static char *cop_to_str(int cop) { switch (cop) { case VAS_COP_TYPE_FAULT: return "Fault"; case VAS_COP_TYPE_842: return "NX-842 Normal Priority"; case VAS_COP_TYPE_842_HIPRI: return "NX-842 High Priority"; case VAS_COP_TYPE_GZIP: return "NX-GZIP Normal Priority"; case VAS_COP_TYPE_GZIP_HIPRI: return "NX-GZIP High Priority"; case VAS_COP_TYPE_FTW: return "Fast Thread-wakeup"; default: return "Unknown"; } } static int info_show(struct seq_file *s, void *private) { struct pnv_vas_window *window = s->private; mutex_lock(&vas_mutex); /* ensure window is not unmapped */ if (!window->hvwc_map) goto unlock; seq_printf(s, "Type: %s, %s\n", cop_to_str(window->vas_win.cop), window->tx_win ? "Send" : "Receive"); seq_printf(s, "Pid : %d\n", vas_window_pid(&window->vas_win)); unlock: mutex_unlock(&vas_mutex); return 0; } DEFINE_SHOW_ATTRIBUTE(info); static inline void print_reg(struct seq_file *s, struct pnv_vas_window *win, char *name, u32 reg) { seq_printf(s, "0x%016llx %s\n", read_hvwc_reg(win, name, reg), name); } static int hvwc_show(struct seq_file *s, void *private) { struct pnv_vas_window *window = s->private; mutex_lock(&vas_mutex); /* ensure window is not unmapped */ if (!window->hvwc_map) goto unlock; print_reg(s, window, VREG(LPID)); print_reg(s, window, VREG(PID)); print_reg(s, window, VREG(XLATE_MSR)); print_reg(s, window, VREG(XLATE_LPCR)); print_reg(s, window, VREG(XLATE_CTL)); print_reg(s, window, VREG(AMR)); print_reg(s, window, VREG(SEIDR)); print_reg(s, window, VREG(FAULT_TX_WIN)); print_reg(s, window, VREG(OSU_INTR_SRC_RA)); print_reg(s, window, VREG(HV_INTR_SRC_RA)); print_reg(s, window, VREG(PSWID)); print_reg(s, window, VREG(LFIFO_BAR)); print_reg(s, window, VREG(LDATA_STAMP_CTL)); print_reg(s, window, VREG(LDMA_CACHE_CTL)); print_reg(s, window, VREG(LRFIFO_PUSH)); print_reg(s, window, VREG(CURR_MSG_COUNT)); print_reg(s, window, VREG(LNOTIFY_AFTER_COUNT)); print_reg(s, window, VREG(LRX_WCRED)); print_reg(s, window, VREG(LRX_WCRED_ADDER)); print_reg(s, window, VREG(TX_WCRED)); print_reg(s, window, VREG(TX_WCRED_ADDER)); print_reg(s, window, VREG(LFIFO_SIZE)); print_reg(s, window, VREG(WINCTL)); print_reg(s, window, VREG(WIN_STATUS)); print_reg(s, window, VREG(WIN_CTX_CACHING_CTL)); print_reg(s, window, VREG(TX_RSVD_BUF_COUNT)); print_reg(s, window, VREG(LRFIFO_WIN_PTR)); print_reg(s, window, VREG(LNOTIFY_CTL)); print_reg(s, window, VREG(LNOTIFY_PID)); print_reg(s, window, VREG(LNOTIFY_LPID)); print_reg(s, window, VREG(LNOTIFY_TID)); print_reg(s, window, VREG(LNOTIFY_SCOPE)); print_reg(s, window, VREG(NX_UTIL_ADDER)); unlock: mutex_unlock(&vas_mutex); return 0; } DEFINE_SHOW_ATTRIBUTE(hvwc); void vas_window_free_dbgdir(struct pnv_vas_window *pnv_win) { struct vas_window *window = &pnv_win->vas_win; if (window->dbgdir) { debugfs_remove_recursive(window->dbgdir); kfree(window->dbgname); window->dbgdir = NULL; window->dbgname = NULL; } } void vas_window_init_dbgdir(struct pnv_vas_window *window) { struct dentry *d; if (!window->vinst->dbgdir) return; window->vas_win.dbgname = kzalloc(16, GFP_KERNEL); if (!window->vas_win.dbgname) return; snprintf(window->vas_win.dbgname, 16, "w%d", window->vas_win.winid); d = debugfs_create_dir(window->vas_win.dbgname, window->vinst->dbgdir); window->vas_win.dbgdir = d; debugfs_create_file("info", 0444, d, window, &info_fops); debugfs_create_file("hvwc", 0444, d, window, &hvwc_fops); } void vas_instance_init_dbgdir(struct vas_instance *vinst) { struct dentry *d; vas_init_dbgdir(); vinst->dbgname = kzalloc(16, GFP_KERNEL); if (!vinst->dbgname) return; snprintf(vinst->dbgname, 16, "v%d", vinst->vas_id); d = debugfs_create_dir(vinst->dbgname, vas_debugfs); vinst->dbgdir = d; } /* * Set up the "root" VAS debugfs dir. Return if we already set it up * (or failed to) in an earlier instance of VAS. */ void vas_init_dbgdir(void) { static bool first_time = true; if (!first_time) return; first_time = false; vas_debugfs = debugfs_create_dir("vas", NULL); }
linux-master
arch/powerpc/platforms/powernv/vas-debug.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV Platform dependent EEH operations * * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013. */ #include <linux/atomic.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/list.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/rbtree.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <asm/eeh.h> #include <asm/eeh_event.h> #include <asm/firmware.h> #include <asm/io.h> #include <asm/iommu.h> #include <asm/machdep.h> #include <asm/msi_bitmap.h> #include <asm/opal.h> #include <asm/ppc-pci.h> #include <asm/pnv-pci.h> #include "powernv.h" #include "pci.h" #include "../../../../drivers/pci/pci.h" static int eeh_event_irq = -EINVAL; static void pnv_pcibios_bus_add_device(struct pci_dev *pdev) { dev_dbg(&pdev->dev, "EEH: Setting up device\n"); eeh_probe_device(pdev); } static irqreturn_t pnv_eeh_event(int irq, void *data) { /* * We simply send a special EEH event if EEH has been * enabled. We don't care about EEH events until we've * finished processing the outstanding ones. Event processing * gets unmasked in next_error() if EEH is enabled. */ disable_irq_nosync(irq); if (eeh_enabled()) eeh_send_failure_event(NULL); return IRQ_HANDLED; } #ifdef CONFIG_DEBUG_FS static ssize_t pnv_eeh_ei_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *ppos) { struct pci_controller *hose = filp->private_data; struct eeh_pe *pe; int pe_no, type, func; unsigned long addr, mask; char buf[50]; int ret; if (!eeh_ops || !eeh_ops->err_inject) return -ENXIO; /* Copy over argument buffer */ ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); if (!ret) return -EFAULT; /* Retrieve parameters */ ret = sscanf(buf, "%x:%x:%x:%lx:%lx", &pe_no, &type, &func, &addr, &mask); if (ret != 5) return -EINVAL; /* Retrieve PE */ pe = eeh_pe_get(hose, pe_no); if (!pe) return -ENODEV; /* Do error injection */ ret = eeh_ops->err_inject(pe, type, func, addr, mask); return ret < 0 ? ret : count; } static const struct file_operations pnv_eeh_ei_fops = { .open = simple_open, .llseek = no_llseek, .write = pnv_eeh_ei_write, }; static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val) { struct pci_controller *hose = data; struct pnv_phb *phb = hose->private_data; out_be64(phb->regs + offset, val); return 0; } static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val) { struct pci_controller *hose = data; struct pnv_phb *phb = hose->private_data; *val = in_be64(phb->regs + offset); return 0; } #define PNV_EEH_DBGFS_ENTRY(name, reg) \ static int pnv_eeh_dbgfs_set_##name(void *data, u64 val) \ { \ return pnv_eeh_dbgfs_set(data, reg, val); \ } \ \ static int pnv_eeh_dbgfs_get_##name(void *data, u64 *val) \ { \ return pnv_eeh_dbgfs_get(data, reg, val); \ } \ \ DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_dbgfs_ops_##name, \ pnv_eeh_dbgfs_get_##name, \ pnv_eeh_dbgfs_set_##name, \ "0x%llx\n") PNV_EEH_DBGFS_ENTRY(outb, 0xD10); PNV_EEH_DBGFS_ENTRY(inbA, 0xD90); PNV_EEH_DBGFS_ENTRY(inbB, 0xE10); #endif /* CONFIG_DEBUG_FS */ static void pnv_eeh_enable_phbs(void) { struct pci_controller *hose; struct pnv_phb *phb; list_for_each_entry(hose, &hose_list, list_node) { phb = hose->private_data; /* * If EEH is enabled, we're going to rely on that. * Otherwise, we restore to conventional mechanism * to clear frozen PE during PCI config access. */ if (eeh_enabled()) phb->flags |= PNV_PHB_FLAG_EEH; else phb->flags &= ~PNV_PHB_FLAG_EEH; } } /** * pnv_eeh_post_init - EEH platform dependent post initialization * * EEH platform dependent post initialization on powernv. When * the function is called, the EEH PEs and devices should have * been built. If the I/O cache staff has been built, EEH is * ready to supply service. */ int pnv_eeh_post_init(void) { struct pci_controller *hose; struct pnv_phb *phb; int ret = 0; eeh_show_enabled(); /* Register OPAL event notifier */ eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR)); if (eeh_event_irq < 0) { pr_err("%s: Can't register OPAL event interrupt (%d)\n", __func__, eeh_event_irq); return eeh_event_irq; } ret = request_irq(eeh_event_irq, pnv_eeh_event, IRQ_TYPE_LEVEL_HIGH, "opal-eeh", NULL); if (ret < 0) { irq_dispose_mapping(eeh_event_irq); pr_err("%s: Can't request OPAL event interrupt (%d)\n", __func__, eeh_event_irq); return ret; } if (!eeh_enabled()) disable_irq(eeh_event_irq); pnv_eeh_enable_phbs(); list_for_each_entry(hose, &hose_list, list_node) { phb = hose->private_data; /* Create debugfs entries */ #ifdef CONFIG_DEBUG_FS if (phb->has_dbgfs || !phb->dbgfs) continue; phb->has_dbgfs = 1; debugfs_create_file("err_injct", 0200, phb->dbgfs, hose, &pnv_eeh_ei_fops); debugfs_create_file("err_injct_outbound", 0600, phb->dbgfs, hose, &pnv_eeh_dbgfs_ops_outb); debugfs_create_file("err_injct_inboundA", 0600, phb->dbgfs, hose, &pnv_eeh_dbgfs_ops_inbA); debugfs_create_file("err_injct_inboundB", 0600, phb->dbgfs, hose, &pnv_eeh_dbgfs_ops_inbB); #endif /* CONFIG_DEBUG_FS */ } return ret; } static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap) { int pos = PCI_CAPABILITY_LIST; int cnt = 48; /* Maximal number of capabilities */ u32 status, id; if (!pdn) return 0; /* Check if the device supports capabilities */ pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status); if (!(status & PCI_STATUS_CAP_LIST)) return 0; while (cnt--) { pnv_pci_cfg_read(pdn, pos, 1, &pos); if (pos < 0x40) break; pos &= ~3; pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id); if (id == 0xff) break; /* Found */ if (id == cap) return pos; /* Next one */ pos += PCI_CAP_LIST_NEXT; } return 0; } static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap) { struct eeh_dev *edev = pdn_to_eeh_dev(pdn); u32 header; int pos = 256, ttl = (4096 - 256) / 8; if (!edev || !edev->pcie_cap) return 0; if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) return 0; else if (!header) return 0; while (ttl-- > 0) { if (PCI_EXT_CAP_ID(header) == cap && pos) return pos; pos = PCI_EXT_CAP_NEXT(header); if (pos < 256) break; if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL) break; } return 0; } static struct eeh_pe *pnv_eeh_get_upstream_pe(struct pci_dev *pdev) { struct pci_controller *hose = pdev->bus->sysdata; struct pnv_phb *phb = hose->private_data; struct pci_dev *parent = pdev->bus->self; #ifdef CONFIG_PCI_IOV /* for VFs we use the PF's PE as the upstream PE */ if (pdev->is_virtfn) parent = pdev->physfn; #endif /* otherwise use the PE of our parent bridge */ if (parent) { struct pnv_ioda_pe *ioda_pe = pnv_ioda_get_pe(parent); return eeh_pe_get(phb->hose, ioda_pe->pe_number); } return NULL; } /** * pnv_eeh_probe - Do probe on PCI device * @pdev: pci_dev to probe * * Create, or find the existing, eeh_dev for this pci_dev. */ static struct eeh_dev *pnv_eeh_probe(struct pci_dev *pdev) { struct pci_dn *pdn = pci_get_pdn(pdev); struct pci_controller *hose = pdn->phb; struct pnv_phb *phb = hose->private_data; struct eeh_dev *edev = pdn_to_eeh_dev(pdn); struct eeh_pe *upstream_pe; uint32_t pcie_flags; int ret; int config_addr = (pdn->busno << 8) | (pdn->devfn); /* * When probing the root bridge, which doesn't have any * subordinate PCI devices. We don't have OF node for * the root bridge. So it's not reasonable to continue * the probing. */ if (!edev || edev->pe) return NULL; /* already configured? */ if (edev->pdev) { pr_debug("%s: found existing edev for %04x:%02x:%02x.%01x\n", __func__, hose->global_number, config_addr >> 8, PCI_SLOT(config_addr), PCI_FUNC(config_addr)); return edev; } /* Skip for PCI-ISA bridge */ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) return NULL; eeh_edev_dbg(edev, "Probing device\n"); /* Initialize eeh device */ edev->mode &= 0xFFFFFF00; edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX); edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP); edev->af_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_AF); edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR); if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { edev->mode |= EEH_DEV_BRIDGE; if (edev->pcie_cap) { pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS, 2, &pcie_flags); pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4; if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT) edev->mode |= EEH_DEV_ROOT_PORT; else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM) edev->mode |= EEH_DEV_DS_PORT; } } edev->pe_config_addr = phb->ioda.pe_rmap[config_addr]; upstream_pe = pnv_eeh_get_upstream_pe(pdev); /* Create PE */ ret = eeh_pe_tree_insert(edev, upstream_pe); if (ret) { eeh_edev_warn(edev, "Failed to add device to PE (code %d)\n", ret); return NULL; } /* * If the PE contains any one of following adapters, the * PCI config space can't be accessed when dumping EEH log. * Otherwise, we will run into fenced PHB caused by shortage * of outbound credits in the adapter. The PCI config access * should be blocked until PE reset. MMIO access is dropped * by hardware certainly. In order to drop PCI config requests, * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which * will be checked in the backend for PE state retrieval. If * the PE becomes frozen for the first time and the flag has * been set for the PE, we will set EEH_PE_CFG_BLOCKED for * that PE to block its config space. * * Broadcom BCM5718 2-ports NICs (14e4:1656) * Broadcom Austin 4-ports NICs (14e4:1657) * Broadcom Shiner 4-ports 1G NICs (14e4:168a) * Broadcom Shiner 2-ports 10G NICs (14e4:168e) */ if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && pdn->device_id == 0x1656) || (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && pdn->device_id == 0x1657) || (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && pdn->device_id == 0x168a) || (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM && pdn->device_id == 0x168e)) edev->pe->state |= EEH_PE_CFG_RESTRICTED; /* * Cache the PE primary bus, which can't be fetched when * full hotplug is in progress. In that case, all child * PCI devices of the PE are expected to be removed prior * to PE reset. */ if (!(edev->pe->state & EEH_PE_PRI_BUS)) { edev->pe->bus = pci_find_bus(hose->global_number, pdn->busno); if (edev->pe->bus) edev->pe->state |= EEH_PE_PRI_BUS; } /* * Enable EEH explicitly so that we will do EEH check * while accessing I/O stuff */ if (!eeh_has_flag(EEH_ENABLED)) { enable_irq(eeh_event_irq); pnv_eeh_enable_phbs(); eeh_add_flag(EEH_ENABLED); } /* Save memory bars */ eeh_save_bars(edev); eeh_edev_dbg(edev, "EEH enabled on device\n"); return edev; } /** * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable * @pe: EEH PE * @option: operation to be issued * * The function is used to control the EEH functionality globally. * Currently, following options are support according to PAPR: * Enable EEH, Disable EEH, Enable MMIO and Enable DMA */ static int pnv_eeh_set_option(struct eeh_pe *pe, int option) { struct pci_controller *hose = pe->phb; struct pnv_phb *phb = hose->private_data; bool freeze_pe = false; int opt; s64 rc; switch (option) { case EEH_OPT_DISABLE: return -EPERM; case EEH_OPT_ENABLE: return 0; case EEH_OPT_THAW_MMIO: opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO; break; case EEH_OPT_THAW_DMA: opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA; break; case EEH_OPT_FREEZE_PE: freeze_pe = true; opt = OPAL_EEH_ACTION_SET_FREEZE_ALL; break; default: pr_warn("%s: Invalid option %d\n", __func__, option); return -EINVAL; } /* Freeze master and slave PEs if PHB supports compound PEs */ if (freeze_pe) { if (phb->freeze_pe) { phb->freeze_pe(phb, pe->addr); return 0; } rc = opal_pci_eeh_freeze_set(phb->opal_id, pe->addr, opt); if (rc != OPAL_SUCCESS) { pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", __func__, rc, phb->hose->global_number, pe->addr); return -EIO; } return 0; } /* Unfreeze master and slave PEs if PHB supports */ if (phb->unfreeze_pe) return phb->unfreeze_pe(phb, pe->addr, opt); rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe->addr, opt); if (rc != OPAL_SUCCESS) { pr_warn("%s: Failure %lld enable %d for PHB#%x-PE#%x\n", __func__, rc, option, phb->hose->global_number, pe->addr); return -EIO; } return 0; } static void pnv_eeh_get_phb_diag(struct eeh_pe *pe) { struct pnv_phb *phb = pe->phb->private_data; s64 rc; rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data, phb->diag_data_size); if (rc != OPAL_SUCCESS) pr_warn("%s: Failure %lld getting PHB#%x diag-data\n", __func__, rc, pe->phb->global_number); } static int pnv_eeh_get_phb_state(struct eeh_pe *pe) { struct pnv_phb *phb = pe->phb->private_data; u8 fstate = 0; __be16 pcierr = 0; s64 rc; int result = 0; rc = opal_pci_eeh_freeze_status(phb->opal_id, pe->addr, &fstate, &pcierr, NULL); if (rc != OPAL_SUCCESS) { pr_warn("%s: Failure %lld getting PHB#%x state\n", __func__, rc, phb->hose->global_number); return EEH_STATE_NOT_SUPPORT; } /* * Check PHB state. If the PHB is frozen for the * first time, to dump the PHB diag-data. */ if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) { result = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE | EEH_STATE_MMIO_ENABLED | EEH_STATE_DMA_ENABLED); } else if (!(pe->state & EEH_PE_ISOLATED)) { eeh_pe_mark_isolated(pe); pnv_eeh_get_phb_diag(pe); if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) pnv_pci_dump_phb_diag_data(pe->phb, pe->data); } return result; } static int pnv_eeh_get_pe_state(struct eeh_pe *pe) { struct pnv_phb *phb = pe->phb->private_data; u8 fstate = 0; __be16 pcierr = 0; s64 rc; int result; /* * We don't clobber hardware frozen state until PE * reset is completed. In order to keep EEH core * moving forward, we have to return operational * state during PE reset. */ if (pe->state & EEH_PE_RESET) { result = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE | EEH_STATE_MMIO_ENABLED | EEH_STATE_DMA_ENABLED); return result; } /* * Fetch PE state from hardware. If the PHB * supports compound PE, let it handle that. */ if (phb->get_pe_state) { fstate = phb->get_pe_state(phb, pe->addr); } else { rc = opal_pci_eeh_freeze_status(phb->opal_id, pe->addr, &fstate, &pcierr, NULL); if (rc != OPAL_SUCCESS) { pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n", __func__, rc, phb->hose->global_number, pe->addr); return EEH_STATE_NOT_SUPPORT; } } /* Figure out state */ switch (fstate) { case OPAL_EEH_STOPPED_NOT_FROZEN: result = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE | EEH_STATE_MMIO_ENABLED | EEH_STATE_DMA_ENABLED); break; case OPAL_EEH_STOPPED_MMIO_FREEZE: result = (EEH_STATE_DMA_ACTIVE | EEH_STATE_DMA_ENABLED); break; case OPAL_EEH_STOPPED_DMA_FREEZE: result = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_MMIO_ENABLED); break; case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE: result = 0; break; case OPAL_EEH_STOPPED_RESET: result = EEH_STATE_RESET_ACTIVE; break; case OPAL_EEH_STOPPED_TEMP_UNAVAIL: result = EEH_STATE_UNAVAILABLE; break; case OPAL_EEH_STOPPED_PERM_UNAVAIL: result = EEH_STATE_NOT_SUPPORT; break; default: result = EEH_STATE_NOT_SUPPORT; pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n", __func__, phb->hose->global_number, pe->addr, fstate); } /* * If PHB supports compound PE, to freeze all * slave PEs for consistency. * * If the PE is switching to frozen state for the * first time, to dump the PHB diag-data. */ if (!(result & EEH_STATE_NOT_SUPPORT) && !(result & EEH_STATE_UNAVAILABLE) && !(result & EEH_STATE_MMIO_ACTIVE) && !(result & EEH_STATE_DMA_ACTIVE) && !(pe->state & EEH_PE_ISOLATED)) { if (phb->freeze_pe) phb->freeze_pe(phb, pe->addr); eeh_pe_mark_isolated(pe); pnv_eeh_get_phb_diag(pe); if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) pnv_pci_dump_phb_diag_data(pe->phb, pe->data); } return result; } /** * pnv_eeh_get_state - Retrieve PE state * @pe: EEH PE * @delay: delay while PE state is temporarily unavailable * * Retrieve the state of the specified PE. For IODA-compitable * platform, it should be retrieved from IODA table. Therefore, * we prefer passing down to hardware implementation to handle * it. */ static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay) { int ret; if (pe->type & EEH_PE_PHB) ret = pnv_eeh_get_phb_state(pe); else ret = pnv_eeh_get_pe_state(pe); if (!delay) return ret; /* * If the PE state is temporarily unavailable, * to inform the EEH core delay for default * period (1 second) */ *delay = 0; if (ret & EEH_STATE_UNAVAILABLE) *delay = 1000; return ret; } static s64 pnv_eeh_poll(unsigned long id) { s64 rc = OPAL_HARDWARE; while (1) { rc = opal_pci_poll(id); if (rc <= 0) break; if (system_state < SYSTEM_RUNNING) udelay(1000 * rc); else msleep(rc); } return rc; } int pnv_eeh_phb_reset(struct pci_controller *hose, int option) { struct pnv_phb *phb = hose->private_data; s64 rc = OPAL_HARDWARE; pr_debug("%s: Reset PHB#%x, option=%d\n", __func__, hose->global_number, option); /* Issue PHB complete reset request */ if (option == EEH_RESET_FUNDAMENTAL || option == EEH_RESET_HOT) rc = opal_pci_reset(phb->opal_id, OPAL_RESET_PHB_COMPLETE, OPAL_ASSERT_RESET); else if (option == EEH_RESET_DEACTIVATE) rc = opal_pci_reset(phb->opal_id, OPAL_RESET_PHB_COMPLETE, OPAL_DEASSERT_RESET); if (rc < 0) goto out; /* * Poll state of the PHB until the request is done * successfully. The PHB reset is usually PHB complete * reset followed by hot reset on root bus. So we also * need the PCI bus settlement delay. */ if (rc > 0) rc = pnv_eeh_poll(phb->opal_id); if (option == EEH_RESET_DEACTIVATE) { if (system_state < SYSTEM_RUNNING) udelay(1000 * EEH_PE_RST_SETTLE_TIME); else msleep(EEH_PE_RST_SETTLE_TIME); } out: if (rc != OPAL_SUCCESS) return -EIO; return 0; } static int pnv_eeh_root_reset(struct pci_controller *hose, int option) { struct pnv_phb *phb = hose->private_data; s64 rc = OPAL_HARDWARE; pr_debug("%s: Reset PHB#%x, option=%d\n", __func__, hose->global_number, option); /* * During the reset deassert time, we needn't care * the reset scope because the firmware does nothing * for fundamental or hot reset during deassert phase. */ if (option == EEH_RESET_FUNDAMENTAL) rc = opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_FUNDAMENTAL, OPAL_ASSERT_RESET); else if (option == EEH_RESET_HOT) rc = opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_HOT, OPAL_ASSERT_RESET); else if (option == EEH_RESET_DEACTIVATE) rc = opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_HOT, OPAL_DEASSERT_RESET); if (rc < 0) goto out; /* Poll state of the PHB until the request is done */ if (rc > 0) rc = pnv_eeh_poll(phb->opal_id); if (option == EEH_RESET_DEACTIVATE) msleep(EEH_PE_RST_SETTLE_TIME); out: if (rc != OPAL_SUCCESS) return -EIO; return 0; } static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option) { struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn); struct eeh_dev *edev = pdn_to_eeh_dev(pdn); int aer = edev ? edev->aer_cap : 0; u32 ctrl; pr_debug("%s: Secondary Reset PCI bus %04x:%02x with option %d\n", __func__, pci_domain_nr(dev->bus), dev->bus->number, option); switch (option) { case EEH_RESET_FUNDAMENTAL: case EEH_RESET_HOT: /* Don't report linkDown event */ if (aer) { eeh_ops->read_config(edev, aer + PCI_ERR_UNCOR_MASK, 4, &ctrl); ctrl |= PCI_ERR_UNC_SURPDN; eeh_ops->write_config(edev, aer + PCI_ERR_UNCOR_MASK, 4, ctrl); } eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &ctrl); ctrl |= PCI_BRIDGE_CTL_BUS_RESET; eeh_ops->write_config(edev, PCI_BRIDGE_CONTROL, 2, ctrl); msleep(EEH_PE_RST_HOLD_TIME); break; case EEH_RESET_DEACTIVATE: eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &ctrl); ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; eeh_ops->write_config(edev, PCI_BRIDGE_CONTROL, 2, ctrl); msleep(EEH_PE_RST_SETTLE_TIME); /* Continue reporting linkDown event */ if (aer) { eeh_ops->read_config(edev, aer + PCI_ERR_UNCOR_MASK, 4, &ctrl); ctrl &= ~PCI_ERR_UNC_SURPDN; eeh_ops->write_config(edev, aer + PCI_ERR_UNCOR_MASK, 4, ctrl); } break; } return 0; } static int pnv_eeh_bridge_reset(struct pci_dev *pdev, int option) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; struct device_node *dn = pci_device_to_OF_node(pdev); uint64_t id = PCI_SLOT_ID(phb->opal_id, pci_dev_id(pdev)); uint8_t scope; int64_t rc; /* Hot reset to the bus if firmware cannot handle */ if (!dn || !of_get_property(dn, "ibm,reset-by-firmware", NULL)) return __pnv_eeh_bridge_reset(pdev, option); pr_debug("%s: FW reset PCI bus %04x:%02x with option %d\n", __func__, pci_domain_nr(pdev->bus), pdev->bus->number, option); switch (option) { case EEH_RESET_FUNDAMENTAL: scope = OPAL_RESET_PCI_FUNDAMENTAL; break; case EEH_RESET_HOT: scope = OPAL_RESET_PCI_HOT; break; case EEH_RESET_DEACTIVATE: return 0; default: dev_dbg(&pdev->dev, "%s: Unsupported reset %d\n", __func__, option); return -EINVAL; } rc = opal_pci_reset(id, scope, OPAL_ASSERT_RESET); if (rc <= OPAL_SUCCESS) goto out; rc = pnv_eeh_poll(id); out: return (rc == OPAL_SUCCESS) ? 0 : -EIO; } void pnv_pci_reset_secondary_bus(struct pci_dev *dev) { struct pci_controller *hose; if (pci_is_root_bus(dev->bus)) { hose = pci_bus_to_host(dev->bus); pnv_eeh_root_reset(hose, EEH_RESET_HOT); pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE); } else { pnv_eeh_bridge_reset(dev, EEH_RESET_HOT); pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE); } } static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type, int pos, u16 mask) { struct eeh_dev *edev = pdn->edev; int i, status = 0; /* Wait for Transaction Pending bit to be cleared */ for (i = 0; i < 4; i++) { eeh_ops->read_config(edev, pos, 2, &status); if (!(status & mask)) return; msleep((1 << i) * 100); } pr_warn("%s: Pending transaction while issuing %sFLR to %04x:%02x:%02x.%01x\n", __func__, type, pdn->phb->global_number, pdn->busno, PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn)); } static int pnv_eeh_do_flr(struct pci_dn *pdn, int option) { struct eeh_dev *edev = pdn_to_eeh_dev(pdn); u32 reg = 0; if (WARN_ON(!edev->pcie_cap)) return -ENOTTY; eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCAP, 4, &reg); if (!(reg & PCI_EXP_DEVCAP_FLR)) return -ENOTTY; switch (option) { case EEH_RESET_HOT: case EEH_RESET_FUNDAMENTAL: pnv_eeh_wait_for_pending(pdn, "", edev->pcie_cap + PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND); eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 4, &reg); reg |= PCI_EXP_DEVCTL_BCR_FLR; eeh_ops->write_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 4, reg); msleep(EEH_PE_RST_HOLD_TIME); break; case EEH_RESET_DEACTIVATE: eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 4, &reg); reg &= ~PCI_EXP_DEVCTL_BCR_FLR; eeh_ops->write_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL, 4, reg); msleep(EEH_PE_RST_SETTLE_TIME); break; } return 0; } static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option) { struct eeh_dev *edev = pdn_to_eeh_dev(pdn); u32 cap = 0; if (WARN_ON(!edev->af_cap)) return -ENOTTY; eeh_ops->read_config(edev, edev->af_cap + PCI_AF_CAP, 1, &cap); if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) return -ENOTTY; switch (option) { case EEH_RESET_HOT: case EEH_RESET_FUNDAMENTAL: /* * Wait for Transaction Pending bit to clear. A word-aligned * test is used, so we use the control offset rather than status * and shift the test bit to match. */ pnv_eeh_wait_for_pending(pdn, "AF", edev->af_cap + PCI_AF_CTRL, PCI_AF_STATUS_TP << 8); eeh_ops->write_config(edev, edev->af_cap + PCI_AF_CTRL, 1, PCI_AF_CTRL_FLR); msleep(EEH_PE_RST_HOLD_TIME); break; case EEH_RESET_DEACTIVATE: eeh_ops->write_config(edev, edev->af_cap + PCI_AF_CTRL, 1, 0); msleep(EEH_PE_RST_SETTLE_TIME); break; } return 0; } static int pnv_eeh_reset_vf_pe(struct eeh_pe *pe, int option) { struct eeh_dev *edev; struct pci_dn *pdn; int ret; /* The VF PE should have only one child device */ edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry); pdn = eeh_dev_to_pdn(edev); if (!pdn) return -ENXIO; ret = pnv_eeh_do_flr(pdn, option); if (!ret) return ret; return pnv_eeh_do_af_flr(pdn, option); } /** * pnv_eeh_reset - Reset the specified PE * @pe: EEH PE * @option: reset option * * Do reset on the indicated PE. For PCI bus sensitive PE, * we need to reset the parent p2p bridge. The PHB has to * be reinitialized if the p2p bridge is root bridge. For * PCI device sensitive PE, we will try to reset the device * through FLR. For now, we don't have OPAL APIs to do HARD * reset yet, so all reset would be SOFT (HOT) reset. */ static int pnv_eeh_reset(struct eeh_pe *pe, int option) { struct pci_controller *hose = pe->phb; struct pnv_phb *phb; struct pci_bus *bus; int64_t rc; /* * For PHB reset, we always have complete reset. For those PEs whose * primary bus derived from root complex (root bus) or root port * (usually bus#1), we apply hot or fundamental reset on the root port. * For other PEs, we always have hot reset on the PE primary bus. * * Here, we have different design to pHyp, which always clear the * frozen state during PE reset. However, the good idea here from * benh is to keep frozen state before we get PE reset done completely * (until BAR restore). With the frozen state, HW drops illegal IO * or MMIO access, which can incur recursive frozen PE during PE * reset. The side effect is that EEH core has to clear the frozen * state explicitly after BAR restore. */ if (pe->type & EEH_PE_PHB) return pnv_eeh_phb_reset(hose, option); /* * The frozen PE might be caused by PAPR error injection * registers, which are expected to be cleared after hitting * frozen PE as stated in the hardware spec. Unfortunately, * that's not true on P7IOC. So we have to clear it manually * to avoid recursive EEH errors during recovery. */ phb = hose->private_data; if (phb->model == PNV_PHB_MODEL_P7IOC && (option == EEH_RESET_HOT || option == EEH_RESET_FUNDAMENTAL)) { rc = opal_pci_reset(phb->opal_id, OPAL_RESET_PHB_ERROR, OPAL_ASSERT_RESET); if (rc != OPAL_SUCCESS) { pr_warn("%s: Failure %lld clearing error injection registers\n", __func__, rc); return -EIO; } } if (pe->type & EEH_PE_VF) return pnv_eeh_reset_vf_pe(pe, option); bus = eeh_pe_bus_get(pe); if (!bus) { pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", __func__, pe->phb->global_number, pe->addr); return -EIO; } if (pci_is_root_bus(bus)) return pnv_eeh_root_reset(hose, option); /* * For hot resets try use the generic PCI error recovery reset * functions. These correctly handles the case where the secondary * bus is behind a hotplug slot and it will use the slot provided * reset methods to prevent spurious hotplug events during the reset. * * Fundamental resets need to be handled internally to EEH since the * PCI core doesn't really have a concept of a fundamental reset, * mainly because there's no standard way to generate one. Only a * few devices require an FRESET so it should be fine. */ if (option != EEH_RESET_FUNDAMENTAL) { /* * NB: Skiboot and pnv_eeh_bridge_reset() also no-op the * de-assert step. It's like the OPAL reset API was * poorly designed or something... */ if (option == EEH_RESET_DEACTIVATE) return 0; rc = pci_bus_error_reset(bus->self); if (!rc) return 0; } /* otherwise, use the generic bridge reset. this might call into FW */ if (pci_is_root_bus(bus->parent)) return pnv_eeh_root_reset(hose, option); return pnv_eeh_bridge_reset(bus->self, option); } /** * pnv_eeh_get_log - Retrieve error log * @pe: EEH PE * @severity: temporary or permanent error log * @drv_log: driver log to be combined with retrieved error log * @len: length of driver log * * Retrieve the temporary or permanent error from the PE. */ static int pnv_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len) { if (!eeh_has_flag(EEH_EARLY_DUMP_LOG)) pnv_pci_dump_phb_diag_data(pe->phb, pe->data); return 0; } /** * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE * @pe: EEH PE * * The function will be called to reconfigure the bridges included * in the specified PE so that the mulfunctional PE would be recovered * again. */ static int pnv_eeh_configure_bridge(struct eeh_pe *pe) { return 0; } /** * pnv_pe_err_inject - Inject specified error to the indicated PE * @pe: the indicated PE * @type: error type * @func: specific error type * @addr: address * @mask: address mask * * The routine is called to inject specified error, which is * determined by @type and @func, to the indicated PE for * testing purpose. */ static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func, unsigned long addr, unsigned long mask) { struct pci_controller *hose = pe->phb; struct pnv_phb *phb = hose->private_data; s64 rc; if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR && type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) { pr_warn("%s: Invalid error type %d\n", __func__, type); return -ERANGE; } if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR || func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) { pr_warn("%s: Invalid error function %d\n", __func__, func); return -ERANGE; } /* Firmware supports error injection ? */ if (!opal_check_token(OPAL_PCI_ERR_INJECT)) { pr_warn("%s: Firmware doesn't support error injection\n", __func__); return -ENXIO; } /* Do error injection */ rc = opal_pci_err_inject(phb->opal_id, pe->addr, type, func, addr, mask); if (rc != OPAL_SUCCESS) { pr_warn("%s: Failure %lld injecting error " "%d-%d to PHB#%x-PE#%x\n", __func__, rc, type, func, hose->global_number, pe->addr); return -EIO; } return 0; } static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn) { struct eeh_dev *edev = pdn_to_eeh_dev(pdn); if (!edev || !edev->pe) return false; /* * We will issue FLR or AF FLR to all VFs, which are contained * in VF PE. It relies on the EEH PCI config accessors. So we * can't block them during the window. */ if (edev->physfn && (edev->pe->state & EEH_PE_RESET)) return false; if (edev->pe->state & EEH_PE_CFG_BLOCKED) return true; return false; } static int pnv_eeh_read_config(struct eeh_dev *edev, int where, int size, u32 *val) { struct pci_dn *pdn = eeh_dev_to_pdn(edev); if (!pdn) return PCIBIOS_DEVICE_NOT_FOUND; if (pnv_eeh_cfg_blocked(pdn)) { *val = 0xFFFFFFFF; return PCIBIOS_SET_FAILED; } return pnv_pci_cfg_read(pdn, where, size, val); } static int pnv_eeh_write_config(struct eeh_dev *edev, int where, int size, u32 val) { struct pci_dn *pdn = eeh_dev_to_pdn(edev); if (!pdn) return PCIBIOS_DEVICE_NOT_FOUND; if (pnv_eeh_cfg_blocked(pdn)) return PCIBIOS_SET_FAILED; return pnv_pci_cfg_write(pdn, where, size, val); } static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data) { /* GEM */ if (data->gemXfir || data->gemRfir || data->gemRirqfir || data->gemMask || data->gemRwof) pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->gemXfir), be64_to_cpu(data->gemRfir), be64_to_cpu(data->gemRirqfir), be64_to_cpu(data->gemMask), be64_to_cpu(data->gemRwof)); /* LEM */ if (data->lemFir || data->lemErrMask || data->lemAction0 || data->lemAction1 || data->lemWof) pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->lemFir), be64_to_cpu(data->lemErrMask), be64_to_cpu(data->lemAction0), be64_to_cpu(data->lemAction1), be64_to_cpu(data->lemWof)); } static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose) { struct pnv_phb *phb = hose->private_data; struct OpalIoP7IOCErrorData *data = (struct OpalIoP7IOCErrorData*)phb->diag_data; long rc; rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); if (rc != OPAL_SUCCESS) { pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n", __func__, phb->hub_id, rc); return; } switch (be16_to_cpu(data->type)) { case OPAL_P7IOC_DIAG_TYPE_RGC: pr_info("P7IOC diag-data for RGC\n\n"); pnv_eeh_dump_hub_diag_common(data); if (data->rgc.rgcStatus || data->rgc.rgcLdcp) pr_info(" RGC: %016llx %016llx\n", be64_to_cpu(data->rgc.rgcStatus), be64_to_cpu(data->rgc.rgcLdcp)); break; case OPAL_P7IOC_DIAG_TYPE_BI: pr_info("P7IOC diag-data for BI %s\n\n", data->bi.biDownbound ? "Downbound" : "Upbound"); pnv_eeh_dump_hub_diag_common(data); if (data->bi.biLdcp0 || data->bi.biLdcp1 || data->bi.biLdcp2 || data->bi.biFenceStatus) pr_info(" BI: %016llx %016llx %016llx %016llx\n", be64_to_cpu(data->bi.biLdcp0), be64_to_cpu(data->bi.biLdcp1), be64_to_cpu(data->bi.biLdcp2), be64_to_cpu(data->bi.biFenceStatus)); break; case OPAL_P7IOC_DIAG_TYPE_CI: pr_info("P7IOC diag-data for CI Port %d\n\n", data->ci.ciPort); pnv_eeh_dump_hub_diag_common(data); if (data->ci.ciPortStatus || data->ci.ciPortLdcp) pr_info(" CI: %016llx %016llx\n", be64_to_cpu(data->ci.ciPortStatus), be64_to_cpu(data->ci.ciPortLdcp)); break; case OPAL_P7IOC_DIAG_TYPE_MISC: pr_info("P7IOC diag-data for MISC\n\n"); pnv_eeh_dump_hub_diag_common(data); break; case OPAL_P7IOC_DIAG_TYPE_I2C: pr_info("P7IOC diag-data for I2C\n\n"); pnv_eeh_dump_hub_diag_common(data); break; default: pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n", __func__, phb->hub_id, data->type); } } static int pnv_eeh_get_pe(struct pci_controller *hose, u16 pe_no, struct eeh_pe **pe) { struct pnv_phb *phb = hose->private_data; struct pnv_ioda_pe *pnv_pe; struct eeh_pe *dev_pe; /* * If PHB supports compound PE, to fetch * the master PE because slave PE is invisible * to EEH core. */ pnv_pe = &phb->ioda.pe_array[pe_no]; if (pnv_pe->flags & PNV_IODA_PE_SLAVE) { pnv_pe = pnv_pe->master; WARN_ON(!pnv_pe || !(pnv_pe->flags & PNV_IODA_PE_MASTER)); pe_no = pnv_pe->pe_number; } /* Find the PE according to PE# */ dev_pe = eeh_pe_get(hose, pe_no); if (!dev_pe) return -EEXIST; /* Freeze the (compound) PE */ *pe = dev_pe; if (!(dev_pe->state & EEH_PE_ISOLATED)) phb->freeze_pe(phb, pe_no); /* * At this point, we're sure the (compound) PE should * have been frozen. However, we still need poke until * hitting the frozen PE on top level. */ dev_pe = dev_pe->parent; while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) { int ret; ret = eeh_ops->get_state(dev_pe, NULL); if (ret <= 0 || eeh_state_active(ret)) { dev_pe = dev_pe->parent; continue; } /* Frozen parent PE */ *pe = dev_pe; if (!(dev_pe->state & EEH_PE_ISOLATED)) phb->freeze_pe(phb, dev_pe->addr); /* Next one */ dev_pe = dev_pe->parent; } return 0; } /** * pnv_eeh_next_error - Retrieve next EEH error to handle * @pe: Affected PE * * The function is expected to be called by EEH core while it gets * special EEH event (without binding PE). The function calls to * OPAL APIs for next error to handle. The informational error is * handled internally by platform. However, the dead IOC, dead PHB, * fenced PHB and frozen PE should be handled by EEH core eventually. */ static int pnv_eeh_next_error(struct eeh_pe **pe) { struct pci_controller *hose; struct pnv_phb *phb; struct eeh_pe *phb_pe, *parent_pe; __be64 frozen_pe_no; __be16 err_type, severity; long rc; int state, ret = EEH_NEXT_ERR_NONE; /* * While running here, it's safe to purge the event queue. The * event should still be masked. */ eeh_remove_event(NULL, false); list_for_each_entry(hose, &hose_list, list_node) { /* * If the subordinate PCI buses of the PHB has been * removed or is exactly under error recovery, we * needn't take care of it any more. */ phb = hose->private_data; phb_pe = eeh_phb_pe_get(hose); if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED)) continue; rc = opal_pci_next_error(phb->opal_id, &frozen_pe_no, &err_type, &severity); if (rc != OPAL_SUCCESS) { pr_devel("%s: Invalid return value on " "PHB#%x (0x%lx) from opal_pci_next_error", __func__, hose->global_number, rc); continue; } /* If the PHB doesn't have error, stop processing */ if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR || be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) { pr_devel("%s: No error found on PHB#%x\n", __func__, hose->global_number); continue; } /* * Processing the error. We're expecting the error with * highest priority reported upon multiple errors on the * specific PHB. */ pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", __func__, be16_to_cpu(err_type), be16_to_cpu(severity), be64_to_cpu(frozen_pe_no), hose->global_number); switch (be16_to_cpu(err_type)) { case OPAL_EEH_IOC_ERROR: if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) { pr_err("EEH: dead IOC detected\n"); ret = EEH_NEXT_ERR_DEAD_IOC; } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { pr_info("EEH: IOC informative error " "detected\n"); pnv_eeh_get_and_dump_hub_diag(hose); ret = EEH_NEXT_ERR_NONE; } break; case OPAL_EEH_PHB_ERROR: if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) { *pe = phb_pe; pr_err("EEH: dead PHB#%x detected, " "location: %s\n", hose->global_number, eeh_pe_loc_get(phb_pe)); ret = EEH_NEXT_ERR_DEAD_PHB; } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_FENCED) { *pe = phb_pe; pr_err("EEH: Fenced PHB#%x detected, " "location: %s\n", hose->global_number, eeh_pe_loc_get(phb_pe)); ret = EEH_NEXT_ERR_FENCED_PHB; } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { pr_info("EEH: PHB#%x informative error " "detected, location: %s\n", hose->global_number, eeh_pe_loc_get(phb_pe)); pnv_eeh_get_phb_diag(phb_pe); pnv_pci_dump_phb_diag_data(hose, phb_pe->data); ret = EEH_NEXT_ERR_NONE; } break; case OPAL_EEH_PE_ERROR: /* * If we can't find the corresponding PE, we * just try to unfreeze. */ if (pnv_eeh_get_pe(hose, be64_to_cpu(frozen_pe_no), pe)) { pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n", hose->global_number, be64_to_cpu(frozen_pe_no)); pr_info("EEH: PHB location: %s\n", eeh_pe_loc_get(phb_pe)); /* Dump PHB diag-data */ rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data, phb->diag_data_size); if (rc == OPAL_SUCCESS) pnv_pci_dump_phb_diag_data(hose, phb->diag_data); /* Try best to clear it */ opal_pci_eeh_freeze_clear(phb->opal_id, be64_to_cpu(frozen_pe_no), OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); ret = EEH_NEXT_ERR_NONE; } else if ((*pe)->state & EEH_PE_ISOLATED || eeh_pe_passed(*pe)) { ret = EEH_NEXT_ERR_NONE; } else { pr_err("EEH: Frozen PE#%x " "on PHB#%x detected\n", (*pe)->addr, (*pe)->phb->global_number); pr_err("EEH: PE location: %s, " "PHB location: %s\n", eeh_pe_loc_get(*pe), eeh_pe_loc_get(phb_pe)); ret = EEH_NEXT_ERR_FROZEN_PE; } break; default: pr_warn("%s: Unexpected error type %d\n", __func__, be16_to_cpu(err_type)); } /* * EEH core will try recover from fenced PHB or * frozen PE. In the time for frozen PE, EEH core * enable IO path for that before collecting logs, * but it ruins the site. So we have to dump the * log in advance here. */ if ((ret == EEH_NEXT_ERR_FROZEN_PE || ret == EEH_NEXT_ERR_FENCED_PHB) && !((*pe)->state & EEH_PE_ISOLATED)) { eeh_pe_mark_isolated(*pe); pnv_eeh_get_phb_diag(*pe); if (eeh_has_flag(EEH_EARLY_DUMP_LOG)) pnv_pci_dump_phb_diag_data((*pe)->phb, (*pe)->data); } /* * We probably have the frozen parent PE out there and * we need have to handle frozen parent PE firstly. */ if (ret == EEH_NEXT_ERR_FROZEN_PE) { parent_pe = (*pe)->parent; while (parent_pe) { /* Hit the ceiling ? */ if (parent_pe->type & EEH_PE_PHB) break; /* Frozen parent PE ? */ state = eeh_ops->get_state(parent_pe, NULL); if (state > 0 && !eeh_state_active(state)) *pe = parent_pe; /* Next parent level */ parent_pe = parent_pe->parent; } /* We possibly migrate to another PE */ eeh_pe_mark_isolated(*pe); } /* * If we have no errors on the specific PHB or only * informative error there, we continue poking it. * Otherwise, we need actions to be taken by upper * layer. */ if (ret > EEH_NEXT_ERR_INF) break; } /* Unmask the event */ if (ret == EEH_NEXT_ERR_NONE && eeh_enabled()) enable_irq(eeh_event_irq); return ret; } static int pnv_eeh_restore_config(struct eeh_dev *edev) { struct pnv_phb *phb; s64 ret = 0; if (!edev) return -EEXIST; if (edev->physfn) return 0; phb = edev->controller->private_data; ret = opal_pci_reinit(phb->opal_id, OPAL_REINIT_PCI_DEV, edev->bdfn); if (ret) { pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n", __func__, edev->bdfn, ret); return -EIO; } return ret; } static struct eeh_ops pnv_eeh_ops = { .name = "powernv", .probe = pnv_eeh_probe, .set_option = pnv_eeh_set_option, .get_state = pnv_eeh_get_state, .reset = pnv_eeh_reset, .get_log = pnv_eeh_get_log, .configure_bridge = pnv_eeh_configure_bridge, .err_inject = pnv_eeh_err_inject, .read_config = pnv_eeh_read_config, .write_config = pnv_eeh_write_config, .next_error = pnv_eeh_next_error, .restore_config = pnv_eeh_restore_config, .notify_resume = NULL }; /** * eeh_powernv_init - Register platform dependent EEH operations * * EEH initialization on powernv platform. This function should be * called before any EEH related functions. */ static int __init eeh_powernv_init(void) { int max_diag_size = PNV_PCI_DIAG_BUF_SIZE; struct pci_controller *hose; struct pnv_phb *phb; int ret = -EINVAL; if (!firmware_has_feature(FW_FEATURE_OPAL)) { pr_warn("%s: OPAL is required !\n", __func__); return -EINVAL; } /* Set probe mode */ eeh_add_flag(EEH_PROBE_MODE_DEV); /* * P7IOC blocks PCI config access to frozen PE, but PHB3 * doesn't do that. So we have to selectively enable I/O * prior to collecting error log. */ list_for_each_entry(hose, &hose_list, list_node) { phb = hose->private_data; if (phb->model == PNV_PHB_MODEL_P7IOC) eeh_add_flag(EEH_ENABLE_IO_FOR_LOG); if (phb->diag_data_size > max_diag_size) max_diag_size = phb->diag_data_size; break; } /* * eeh_init() allocates the eeh_pe and its aux data buf so the * size needs to be set before calling eeh_init(). */ eeh_set_pe_aux_size(max_diag_size); ppc_md.pcibios_bus_add_device = pnv_pcibios_bus_add_device; ret = eeh_init(&pnv_eeh_ops); if (!ret) pr_info("EEH: PowerNV platform initialized\n"); else pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret); return ret; } machine_arch_initcall(powernv, eeh_powernv_init);
linux-master
arch/powerpc/platforms/powernv/eeh-powernv.c
// SPDX-License-Identifier: GPL-2.0 /* * PowerNV code for secure variables * * Copyright (C) 2019 IBM Corporation * Author: Claudio Carvalho * Nayna Jain * * APIs to access secure variables managed by OPAL. */ #define pr_fmt(fmt) "secvar: "fmt #include <linux/types.h> #include <linux/of.h> #include <linux/platform_device.h> #include <asm/opal.h> #include <asm/secvar.h> #include <asm/secure_boot.h> static int opal_status_to_err(int rc) { int err; switch (rc) { case OPAL_SUCCESS: err = 0; break; case OPAL_UNSUPPORTED: err = -ENXIO; break; case OPAL_PARAMETER: err = -EINVAL; break; case OPAL_RESOURCE: err = -ENOSPC; break; case OPAL_HARDWARE: err = -EIO; break; case OPAL_NO_MEM: err = -ENOMEM; break; case OPAL_EMPTY: err = -ENOENT; break; case OPAL_PARTIAL: err = -EFBIG; break; default: err = -EINVAL; } return err; } static int opal_get_variable(const char *key, u64 ksize, u8 *data, u64 *dsize) { int rc; if (!key || !dsize) return -EINVAL; *dsize = cpu_to_be64(*dsize); rc = opal_secvar_get(key, ksize, data, dsize); *dsize = be64_to_cpu(*dsize); return opal_status_to_err(rc); } static int opal_get_next_variable(const char *key, u64 *keylen, u64 keybufsize) { int rc; if (!key || !keylen) return -EINVAL; *keylen = cpu_to_be64(*keylen); rc = opal_secvar_get_next(key, keylen, keybufsize); *keylen = be64_to_cpu(*keylen); return opal_status_to_err(rc); } static int opal_set_variable(const char *key, u64 ksize, u8 *data, u64 dsize) { int rc; if (!key || !data) return -EINVAL; rc = opal_secvar_enqueue_update(key, ksize, data, dsize); return opal_status_to_err(rc); } static ssize_t opal_secvar_format(char *buf, size_t bufsize) { ssize_t rc = 0; struct device_node *node; const char *format; node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend"); if (!of_device_is_available(node)) { rc = -ENODEV; goto out; } rc = of_property_read_string(node, "format", &format); if (rc) goto out; rc = snprintf(buf, bufsize, "%s", format); out: of_node_put(node); return rc; } static int opal_secvar_max_size(u64 *max_size) { int rc; struct device_node *node; node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend"); if (!node) return -ENODEV; if (!of_device_is_available(node)) { rc = -ENODEV; goto out; } rc = of_property_read_u64(node, "max-var-size", max_size); out: of_node_put(node); return rc; } static const struct secvar_operations opal_secvar_ops = { .get = opal_get_variable, .get_next = opal_get_next_variable, .set = opal_set_variable, .format = opal_secvar_format, .max_size = opal_secvar_max_size, }; static int opal_secvar_probe(struct platform_device *pdev) { if (!opal_check_token(OPAL_SECVAR_GET) || !opal_check_token(OPAL_SECVAR_GET_NEXT) || !opal_check_token(OPAL_SECVAR_ENQUEUE_UPDATE)) { pr_err("OPAL doesn't support secure variables\n"); return -ENODEV; } return set_secvar_ops(&opal_secvar_ops); } static const struct of_device_id opal_secvar_match[] = { { .compatible = "ibm,secvar-backend",}, {}, }; static struct platform_driver opal_secvar_driver = { .driver = { .name = "secvar", .of_match_table = opal_secvar_match, }, }; static int __init opal_secvar_init(void) { return platform_driver_probe(&opal_secvar_driver, opal_secvar_probe); } device_initcall(opal_secvar_init);
linux-master
arch/powerpc/platforms/powernv/opal-secvar.c
// SPDX-License-Identifier: GPL-2.0+ /* * TCE helpers for IODA PCI/PCIe on PowerNV platforms * * Copyright 2018 IBM Corp. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/iommu.h> #include <asm/iommu.h> #include <asm/tce.h> #include "pci.h" unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb) { struct pci_controller *hose = phb->hose; struct device_node *dn = hose->dn; unsigned long mask = 0; int i, rc, count; u32 val; count = of_property_count_u32_elems(dn, "ibm,supported-tce-sizes"); if (count <= 0) { mask = SZ_4K | SZ_64K; /* Add 16M for POWER8 by default */ if (cpu_has_feature(CPU_FTR_ARCH_207S) && !cpu_has_feature(CPU_FTR_ARCH_300)) mask |= SZ_16M | SZ_256M; return mask; } for (i = 0; i < count; i++) { rc = of_property_read_u32_index(dn, "ibm,supported-tce-sizes", i, &val); if (rc == 0) mask |= 1ULL << val; } return mask; } void pnv_pci_setup_iommu_table(struct iommu_table *tbl, void *tce_mem, u64 tce_size, u64 dma_offset, unsigned int page_shift) { tbl->it_blocksize = 16; tbl->it_base = (unsigned long)tce_mem; tbl->it_page_shift = page_shift; tbl->it_offset = dma_offset >> tbl->it_page_shift; tbl->it_index = 0; tbl->it_size = tce_size >> 3; tbl->it_busno = 0; tbl->it_type = TCE_PCI; } static __be64 *pnv_alloc_tce_level(int nid, unsigned int shift) { struct page *tce_mem = NULL; __be64 *addr; tce_mem = alloc_pages_node(nid, GFP_ATOMIC | __GFP_NOWARN, shift - PAGE_SHIFT); if (!tce_mem) { pr_err("Failed to allocate a TCE memory, level shift=%d\n", shift); return NULL; } addr = page_address(tce_mem); memset(addr, 0, 1UL << shift); return addr; } static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr, unsigned long size, unsigned int levels); static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc) { __be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base; int level = tbl->it_indirect_levels; const long shift = ilog2(tbl->it_level_size); unsigned long mask = (tbl->it_level_size - 1) << (level * shift); while (level) { int n = (idx & mask) >> (level * shift); unsigned long oldtce, tce = be64_to_cpu(READ_ONCE(tmp[n])); if (!tce) { __be64 *tmp2; if (!alloc) return NULL; tmp2 = pnv_alloc_tce_level(tbl->it_nid, ilog2(tbl->it_level_size) + 3); if (!tmp2) return NULL; tce = __pa(tmp2) | TCE_PCI_READ | TCE_PCI_WRITE; oldtce = be64_to_cpu(cmpxchg(&tmp[n], 0, cpu_to_be64(tce))); if (oldtce) { pnv_pci_ioda2_table_do_free_pages(tmp2, ilog2(tbl->it_level_size) + 3, 1); tce = oldtce; } } tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE)); idx &= ~mask; mask >>= shift; --level; } return tmp + idx; } int pnv_tce_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, unsigned long attrs) { u64 proto_tce = iommu_direction_to_tce_perm(direction); u64 rpn = __pa(uaddr) >> tbl->it_page_shift; long i; if (proto_tce & TCE_PCI_WRITE) proto_tce |= TCE_PCI_READ; for (i = 0; i < npages; i++) { unsigned long newtce = proto_tce | ((rpn + i) << tbl->it_page_shift); unsigned long idx = index - tbl->it_offset + i; *(pnv_tce(tbl, false, idx, true)) = cpu_to_be64(newtce); } return 0; } #ifdef CONFIG_IOMMU_API int pnv_tce_xchg(struct iommu_table *tbl, long index, unsigned long *hpa, enum dma_data_direction *direction) { u64 proto_tce = iommu_direction_to_tce_perm(*direction); unsigned long newtce = *hpa | proto_tce, oldtce; unsigned long idx = index - tbl->it_offset; __be64 *ptce = NULL; BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl)); if (*direction == DMA_NONE) { ptce = pnv_tce(tbl, false, idx, false); if (!ptce) { *hpa = 0; return 0; } } if (!ptce) { ptce = pnv_tce(tbl, false, idx, true); if (!ptce) return -ENOMEM; } if (newtce & TCE_PCI_WRITE) newtce |= TCE_PCI_READ; oldtce = be64_to_cpu(xchg(ptce, cpu_to_be64(newtce))); *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE); *direction = iommu_tce_direction(oldtce); return 0; } __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index, bool alloc) { if (WARN_ON_ONCE(!tbl->it_userspace)) return NULL; return pnv_tce(tbl, true, index - tbl->it_offset, alloc); } #endif void pnv_tce_free(struct iommu_table *tbl, long index, long npages) { long i; for (i = 0; i < npages; i++) { unsigned long idx = index - tbl->it_offset + i; __be64 *ptce = pnv_tce(tbl, false, idx, false); if (ptce) *ptce = cpu_to_be64(0); else /* Skip the rest of the level */ i |= tbl->it_level_size - 1; } } unsigned long pnv_tce_get(struct iommu_table *tbl, long index) { __be64 *ptce = pnv_tce(tbl, false, index - tbl->it_offset, false); if (!ptce) return 0; return be64_to_cpu(*ptce); } static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr, unsigned long size, unsigned int levels) { const unsigned long addr_ul = (unsigned long) addr & ~(TCE_PCI_READ | TCE_PCI_WRITE); if (levels) { long i; u64 *tmp = (u64 *) addr_ul; for (i = 0; i < size; ++i) { unsigned long hpa = be64_to_cpu(tmp[i]); if (!(hpa & (TCE_PCI_READ | TCE_PCI_WRITE))) continue; pnv_pci_ioda2_table_do_free_pages(__va(hpa), size, levels - 1); } } free_pages(addr_ul, get_order(size << 3)); } void pnv_pci_ioda2_table_free_pages(struct iommu_table *tbl) { const unsigned long size = tbl->it_indirect_levels ? tbl->it_level_size : tbl->it_size; if (!tbl->it_size) return; pnv_pci_ioda2_table_do_free_pages((__be64 *)tbl->it_base, size, tbl->it_indirect_levels); if (tbl->it_userspace) { pnv_pci_ioda2_table_do_free_pages(tbl->it_userspace, size, tbl->it_indirect_levels); } } static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned int shift, unsigned int levels, unsigned long limit, unsigned long *current_offset, unsigned long *total_allocated) { __be64 *addr, *tmp; unsigned long allocated = 1UL << shift; unsigned int entries = 1UL << (shift - 3); long i; addr = pnv_alloc_tce_level(nid, shift); *total_allocated += allocated; --levels; if (!levels) { *current_offset += allocated; return addr; } for (i = 0; i < entries; ++i) { tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift, levels, limit, current_offset, total_allocated); if (!tmp) break; addr[i] = cpu_to_be64(__pa(tmp) | TCE_PCI_READ | TCE_PCI_WRITE); if (*current_offset >= limit) break; } return addr; } long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, __u32 page_shift, __u64 window_size, __u32 levels, bool alloc_userspace_copy, struct iommu_table *tbl) { void *addr, *uas = NULL; unsigned long offset = 0, level_shift, total_allocated = 0; unsigned long total_allocated_uas = 0; const unsigned int window_shift = ilog2(window_size); unsigned int entries_shift = window_shift - page_shift; unsigned int table_shift = max_t(unsigned int, entries_shift + 3, PAGE_SHIFT); const unsigned long tce_table_size = 1UL << table_shift; if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS)) return -EINVAL; if (!is_power_of_2(window_size)) return -EINVAL; /* Adjust direct table size from window_size and levels */ entries_shift = (entries_shift + levels - 1) / levels; level_shift = entries_shift + 3; level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT); if ((level_shift - 3) * levels + page_shift >= 55) return -EINVAL; /* Allocate TCE table */ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, 1, tce_table_size, &offset, &total_allocated); /* addr==NULL means that the first level allocation failed */ if (!addr) return -ENOMEM; /* * First level was allocated but some lower level failed as * we did not allocate as much as we wanted, * release partially allocated table. */ if (levels == 1 && offset < tce_table_size) goto free_tces_exit; /* Allocate userspace view of the TCE table */ if (alloc_userspace_copy) { offset = 0; uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, 1, tce_table_size, &offset, &total_allocated_uas); if (!uas) goto free_tces_exit; if (levels == 1 && (offset < tce_table_size || total_allocated_uas != total_allocated)) goto free_uas_exit; } /* Setup linux iommu table */ pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, bus_offset, page_shift); tbl->it_level_size = 1ULL << (level_shift - 3); tbl->it_indirect_levels = levels - 1; tbl->it_userspace = uas; tbl->it_nid = nid; pr_debug("Created TCE table: ws=%08llx ts=%lx @%08llx base=%lx uas=%p levels=%d/%d\n", window_size, tce_table_size, bus_offset, tbl->it_base, tbl->it_userspace, 1, levels); return 0; free_uas_exit: pnv_pci_ioda2_table_do_free_pages(uas, 1ULL << (level_shift - 3), levels - 1); free_tces_exit: pnv_pci_ioda2_table_do_free_pages(addr, 1ULL << (level_shift - 3), levels - 1); return -ENOMEM; } void pnv_pci_unlink_table_and_group(struct iommu_table *tbl, struct iommu_table_group *table_group) { long i; bool found; struct iommu_table_group_link *tgl; if (!tbl || !table_group) return; /* Remove link to a group from table's list of attached groups */ found = false; rcu_read_lock(); list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) { if (tgl->table_group == table_group) { list_del_rcu(&tgl->next); kfree_rcu(tgl, rcu); found = true; break; } } rcu_read_unlock(); if (WARN_ON(!found)) return; /* Clean a pointer to iommu_table in iommu_table_group::tables[] */ found = false; for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { if (table_group->tables[i] == tbl) { iommu_tce_table_put(tbl); table_group->tables[i] = NULL; found = true; break; } } WARN_ON(!found); } long pnv_pci_link_table_and_group(int node, int num, struct iommu_table *tbl, struct iommu_table_group *table_group) { struct iommu_table_group_link *tgl = NULL; if (WARN_ON(!tbl || !table_group)) return -EINVAL; tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL, node); if (!tgl) return -ENOMEM; tgl->table_group = table_group; list_add_rcu(&tgl->next, &tbl->it_group_list); table_group->tables[num] = iommu_tce_table_get(tbl); return 0; }
linux-master
arch/powerpc/platforms/powernv/pci-ioda-tce.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/percpu.h> #include <linux/jump_label.h> #include <asm/trace.h> #ifdef CONFIG_JUMP_LABEL struct static_key opal_tracepoint_key = STATIC_KEY_INIT; int opal_tracepoint_regfunc(void) { static_key_slow_inc(&opal_tracepoint_key); return 0; } void opal_tracepoint_unregfunc(void) { static_key_slow_dec(&opal_tracepoint_key); } #else /* * We optimise OPAL calls by placing opal_tracepoint_refcount * directly in the TOC so we can check if the opal tracepoints are * enabled via a single load. */ /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ extern long opal_tracepoint_refcount; int opal_tracepoint_regfunc(void) { opal_tracepoint_refcount++; return 0; } void opal_tracepoint_unregfunc(void) { opal_tracepoint_refcount--; } #endif /* * Since the tracing code might execute OPAL calls we need to guard against * recursion. */ static DEFINE_PER_CPU(unsigned int, opal_trace_depth); void __trace_opal_entry(unsigned long opcode, unsigned long *args) { unsigned long flags; unsigned int *depth; local_irq_save(flags); depth = this_cpu_ptr(&opal_trace_depth); if (*depth) goto out; (*depth)++; preempt_disable(); trace_opal_entry(opcode, args); (*depth)--; out: local_irq_restore(flags); } void __trace_opal_exit(long opcode, unsigned long retval) { unsigned long flags; unsigned int *depth; local_irq_save(flags); depth = this_cpu_ptr(&opal_trace_depth); if (*depth) goto out; (*depth)++; trace_opal_exit(opcode, retval); preempt_enable(); (*depth)--; out: local_irq_restore(flags); }
linux-master
arch/powerpc/platforms/powernv/opal-tracepoints.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Support PCI/PCIe on PowerNV platforms * * Copyright 2011 Benjamin Herrenschmidt, IBM Corp. */ #undef DEBUG #include <linux/kernel.h> #include <linux/pci.h> #include <linux/crash_dump.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/init.h> #include <linux/memblock.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/msi.h> #include <linux/iommu.h> #include <linux/rculist.h> #include <linux/sizes.h> #include <linux/debugfs.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <asm/machdep.h> #include <asm/msi_bitmap.h> #include <asm/ppc-pci.h> #include <asm/opal.h> #include <asm/iommu.h> #include <asm/tce.h> #include <asm/xics.h> #include <asm/firmware.h> #include <asm/pnv-pci.h> #include <asm/mmzone.h> #include <asm/xive.h> #include <misc/cxl-base.h> #include "powernv.h" #include "pci.h" #include "../../../../drivers/pci/pci.h" /* This array is indexed with enum pnv_phb_type */ static const char * const pnv_phb_names[] = { "IODA2", "NPU_OCAPI" }; static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable); static void pnv_pci_configure_bus(struct pci_bus *bus); void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, const char *fmt, ...) { struct va_format vaf; va_list args; char pfix[32]; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; if (pe->flags & PNV_IODA_PE_DEV) strscpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) sprintf(pfix, "%04x:%02x ", pci_domain_nr(pe->pbus), pe->pbus->number); #ifdef CONFIG_PCI_IOV else if (pe->flags & PNV_IODA_PE_VF) sprintf(pfix, "%04x:%02x:%2x.%d", pci_domain_nr(pe->parent_dev->bus), (pe->rid & 0xff00) >> 8, PCI_SLOT(pe->rid), PCI_FUNC(pe->rid)); #endif /* CONFIG_PCI_IOV*/ printk("%spci %s: [PE# %.2x] %pV", level, pfix, pe->pe_number, &vaf); va_end(args); } static bool pnv_iommu_bypass_disabled __read_mostly; static bool pci_reset_phbs __read_mostly; static int __init iommu_setup(char *str) { if (!str) return -EINVAL; while (*str) { if (!strncmp(str, "nobypass", 8)) { pnv_iommu_bypass_disabled = true; pr_info("PowerNV: IOMMU bypass window disabled.\n"); break; } str += strcspn(str, ","); if (*str == ',') str++; } return 0; } early_param("iommu", iommu_setup); static int __init pci_reset_phbs_setup(char *str) { pci_reset_phbs = true; return 0; } early_param("ppc_pci_reset_phbs", pci_reset_phbs_setup); static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) { s64 rc; phb->ioda.pe_array[pe_no].phb = phb; phb->ioda.pe_array[pe_no].pe_number = pe_no; phb->ioda.pe_array[pe_no].dma_setup_done = false; /* * Clear the PE frozen state as it might be put into frozen state * in the last PCI remove path. It's not harmful to do so when the * PE is already in unfrozen state. */ rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED) pr_warn("%s: Error %lld unfreezing PHB#%x-PE#%x\n", __func__, rc, phb->hose->global_number, pe_no); return &phb->ioda.pe_array[pe_no]; } static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no) { if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe_num)) { pr_warn("%s: Invalid PE %x on PHB#%x\n", __func__, pe_no, phb->hose->global_number); return; } mutex_lock(&phb->ioda.pe_alloc_mutex); if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) pr_debug("%s: PE %x was reserved on PHB#%x\n", __func__, pe_no, phb->hose->global_number); mutex_unlock(&phb->ioda.pe_alloc_mutex); pnv_ioda_init_pe(phb, pe_no); } struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb, int count) { struct pnv_ioda_pe *ret = NULL; int run = 0, pe, i; mutex_lock(&phb->ioda.pe_alloc_mutex); /* scan backwards for a run of @count cleared bits */ for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) { if (test_bit(pe, phb->ioda.pe_alloc)) { run = 0; continue; } run++; if (run == count) break; } if (run != count) goto out; for (i = pe; i < pe + count; i++) { set_bit(i, phb->ioda.pe_alloc); pnv_ioda_init_pe(phb, i); } ret = &phb->ioda.pe_array[pe]; out: mutex_unlock(&phb->ioda.pe_alloc_mutex); return ret; } void pnv_ioda_free_pe(struct pnv_ioda_pe *pe) { struct pnv_phb *phb = pe->phb; unsigned int pe_num = pe->pe_number; WARN_ON(pe->pdev); memset(pe, 0, sizeof(struct pnv_ioda_pe)); mutex_lock(&phb->ioda.pe_alloc_mutex); clear_bit(pe_num, phb->ioda.pe_alloc); mutex_unlock(&phb->ioda.pe_alloc_mutex); } /* The default M64 BAR is shared by all PEs */ static int pnv_ioda2_init_m64(struct pnv_phb *phb) { const char *desc; struct resource *r; s64 rc; /* Configure the default M64 BAR */ rc = opal_pci_set_phb_mem_window(phb->opal_id, OPAL_M64_WINDOW_TYPE, phb->ioda.m64_bar_idx, phb->ioda.m64_base, 0, /* unused */ phb->ioda.m64_size); if (rc != OPAL_SUCCESS) { desc = "configuring"; goto fail; } /* Enable the default M64 BAR */ rc = opal_pci_phb_mmio_enable(phb->opal_id, OPAL_M64_WINDOW_TYPE, phb->ioda.m64_bar_idx, OPAL_ENABLE_M64_SPLIT); if (rc != OPAL_SUCCESS) { desc = "enabling"; goto fail; } /* * Exclude the segments for reserved and root bus PE, which * are first or last two PEs. */ r = &phb->hose->mem_resources[1]; if (phb->ioda.reserved_pe_idx == 0) r->start += (2 * phb->ioda.m64_segsize); else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) r->end -= (2 * phb->ioda.m64_segsize); else pr_warn(" Cannot strip M64 segment for reserved PE#%x\n", phb->ioda.reserved_pe_idx); return 0; fail: pr_warn(" Failure %lld %s M64 BAR#%d\n", rc, desc, phb->ioda.m64_bar_idx); opal_pci_phb_mmio_enable(phb->opal_id, OPAL_M64_WINDOW_TYPE, phb->ioda.m64_bar_idx, OPAL_DISABLE_M64); return -EIO; } static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev, unsigned long *pe_bitmap) { struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct resource *r; resource_size_t base, sgsz, start, end; int segno, i; base = phb->ioda.m64_base; sgsz = phb->ioda.m64_segsize; for (i = 0; i <= PCI_ROM_RESOURCE; i++) { r = &pdev->resource[i]; if (!r->parent || !pnv_pci_is_m64(phb, r)) continue; start = ALIGN_DOWN(r->start - base, sgsz); end = ALIGN(r->end - base, sgsz); for (segno = start / sgsz; segno < end / sgsz; segno++) { if (pe_bitmap) set_bit(segno, pe_bitmap); else pnv_ioda_reserve_pe(phb, segno); } } } static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus, unsigned long *pe_bitmap, bool all) { struct pci_dev *pdev; list_for_each_entry(pdev, &bus->devices, bus_list) { pnv_ioda_reserve_dev_m64_pe(pdev, pe_bitmap); if (all && pdev->subordinate) pnv_ioda_reserve_m64_pe(pdev->subordinate, pe_bitmap, all); } } static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all) { struct pnv_phb *phb = pci_bus_to_pnvhb(bus); struct pnv_ioda_pe *master_pe, *pe; unsigned long size, *pe_alloc; int i; /* Root bus shouldn't use M64 */ if (pci_is_root_bus(bus)) return NULL; /* Allocate bitmap */ size = ALIGN(phb->ioda.total_pe_num / 8, sizeof(unsigned long)); pe_alloc = kzalloc(size, GFP_KERNEL); if (!pe_alloc) { pr_warn("%s: Out of memory !\n", __func__); return NULL; } /* Figure out reserved PE numbers by the PE */ pnv_ioda_reserve_m64_pe(bus, pe_alloc, all); /* * the current bus might not own M64 window and that's all * contributed by its child buses. For the case, we needn't * pick M64 dependent PE#. */ if (bitmap_empty(pe_alloc, phb->ioda.total_pe_num)) { kfree(pe_alloc); return NULL; } /* * Figure out the master PE and put all slave PEs to master * PE's list to form compound PE. */ master_pe = NULL; i = -1; while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe_num, i + 1)) < phb->ioda.total_pe_num) { pe = &phb->ioda.pe_array[i]; phb->ioda.m64_segmap[pe->pe_number] = pe->pe_number; if (!master_pe) { pe->flags |= PNV_IODA_PE_MASTER; INIT_LIST_HEAD(&pe->slaves); master_pe = pe; } else { pe->flags |= PNV_IODA_PE_SLAVE; pe->master = master_pe; list_add_tail(&pe->list, &master_pe->slaves); } } kfree(pe_alloc); return master_pe; } static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb) { struct pci_controller *hose = phb->hose; struct device_node *dn = hose->dn; struct resource *res; u32 m64_range[2], i; const __be32 *r; u64 pci_addr; if (phb->type != PNV_PHB_IODA2) { pr_info(" Not support M64 window\n"); return; } if (!firmware_has_feature(FW_FEATURE_OPAL)) { pr_info(" Firmware too old to support M64 window\n"); return; } r = of_get_property(dn, "ibm,opal-m64-window", NULL); if (!r) { pr_info(" No <ibm,opal-m64-window> on %pOF\n", dn); return; } /* * Find the available M64 BAR range and pickup the last one for * covering the whole 64-bits space. We support only one range. */ if (of_property_read_u32_array(dn, "ibm,opal-available-m64-ranges", m64_range, 2)) { /* In absence of the property, assume 0..15 */ m64_range[0] = 0; m64_range[1] = 16; } /* We only support 64 bits in our allocator */ if (m64_range[1] > 63) { pr_warn("%s: Limiting M64 range to 63 (from %d) on PHB#%x\n", __func__, m64_range[1], phb->hose->global_number); m64_range[1] = 63; } /* Empty range, no m64 */ if (m64_range[1] <= m64_range[0]) { pr_warn("%s: M64 empty, disabling M64 usage on PHB#%x\n", __func__, phb->hose->global_number); return; } /* Configure M64 informations */ res = &hose->mem_resources[1]; res->name = dn->full_name; res->start = of_translate_address(dn, r + 2); res->end = res->start + of_read_number(r + 4, 2) - 1; res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH); pci_addr = of_read_number(r, 2); hose->mem_offset[1] = res->start - pci_addr; phb->ioda.m64_size = resource_size(res); phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe_num; phb->ioda.m64_base = pci_addr; /* This lines up nicely with the display from processing OF ranges */ pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx (M64 #%d..%d)\n", res->start, res->end, pci_addr, m64_range[0], m64_range[0] + m64_range[1] - 1); /* Mark all M64 used up by default */ phb->ioda.m64_bar_alloc = (unsigned long)-1; /* Use last M64 BAR to cover M64 window */ m64_range[1]--; phb->ioda.m64_bar_idx = m64_range[0] + m64_range[1]; pr_info(" Using M64 #%d as default window\n", phb->ioda.m64_bar_idx); /* Mark remaining ones free */ for (i = m64_range[0]; i < m64_range[1]; i++) clear_bit(i, &phb->ioda.m64_bar_alloc); /* * Setup init functions for M64 based on IODA version, IODA3 uses * the IODA2 code. */ phb->init_m64 = pnv_ioda2_init_m64; } static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no) { struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no]; struct pnv_ioda_pe *slave; s64 rc; /* Fetch master PE */ if (pe->flags & PNV_IODA_PE_SLAVE) { pe = pe->master; if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER))) return; pe_no = pe->pe_number; } /* Freeze master PE */ rc = opal_pci_eeh_freeze_set(phb->opal_id, pe_no, OPAL_EEH_ACTION_SET_FREEZE_ALL); if (rc != OPAL_SUCCESS) { pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", __func__, rc, phb->hose->global_number, pe_no); return; } /* Freeze slave PEs */ if (!(pe->flags & PNV_IODA_PE_MASTER)) return; list_for_each_entry(slave, &pe->slaves, list) { rc = opal_pci_eeh_freeze_set(phb->opal_id, slave->pe_number, OPAL_EEH_ACTION_SET_FREEZE_ALL); if (rc != OPAL_SUCCESS) pr_warn("%s: Failure %lld freezing PHB#%x-PE#%x\n", __func__, rc, phb->hose->global_number, slave->pe_number); } } static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt) { struct pnv_ioda_pe *pe, *slave; s64 rc; /* Find master PE */ pe = &phb->ioda.pe_array[pe_no]; if (pe->flags & PNV_IODA_PE_SLAVE) { pe = pe->master; WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); pe_no = pe->pe_number; } /* Clear frozen state for master PE */ rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no, opt); if (rc != OPAL_SUCCESS) { pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n", __func__, rc, opt, phb->hose->global_number, pe_no); return -EIO; } if (!(pe->flags & PNV_IODA_PE_MASTER)) return 0; /* Clear frozen state for slave PEs */ list_for_each_entry(slave, &pe->slaves, list) { rc = opal_pci_eeh_freeze_clear(phb->opal_id, slave->pe_number, opt); if (rc != OPAL_SUCCESS) { pr_warn("%s: Failure %lld clear %d on PHB#%x-PE#%x\n", __func__, rc, opt, phb->hose->global_number, slave->pe_number); return -EIO; } } return 0; } static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no) { struct pnv_ioda_pe *slave, *pe; u8 fstate = 0, state; __be16 pcierr = 0; s64 rc; /* Sanity check on PE number */ if (pe_no < 0 || pe_no >= phb->ioda.total_pe_num) return OPAL_EEH_STOPPED_PERM_UNAVAIL; /* * Fetch the master PE and the PE instance might be * not initialized yet. */ pe = &phb->ioda.pe_array[pe_no]; if (pe->flags & PNV_IODA_PE_SLAVE) { pe = pe->master; WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); pe_no = pe->pe_number; } /* Check the master PE */ rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, &state, &pcierr, NULL); if (rc != OPAL_SUCCESS) { pr_warn("%s: Failure %lld getting " "PHB#%x-PE#%x state\n", __func__, rc, phb->hose->global_number, pe_no); return OPAL_EEH_STOPPED_TEMP_UNAVAIL; } /* Check the slave PE */ if (!(pe->flags & PNV_IODA_PE_MASTER)) return state; list_for_each_entry(slave, &pe->slaves, list) { rc = opal_pci_eeh_freeze_status(phb->opal_id, slave->pe_number, &fstate, &pcierr, NULL); if (rc != OPAL_SUCCESS) { pr_warn("%s: Failure %lld getting " "PHB#%x-PE#%x state\n", __func__, rc, phb->hose->global_number, slave->pe_number); return OPAL_EEH_STOPPED_TEMP_UNAVAIL; } /* * Override the result based on the ascending * priority. */ if (fstate > state) state = fstate; } return state; } struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn) { int pe_number = phb->ioda.pe_rmap[bdfn]; if (pe_number == IODA_INVALID_PE) return NULL; return &phb->ioda.pe_array[pe_number]; } struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) { struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); struct pci_dn *pdn = pci_get_pdn(dev); if (!pdn) return NULL; if (pdn->pe_number == IODA_INVALID_PE) return NULL; return &phb->ioda.pe_array[pdn->pe_number]; } static int pnv_ioda_set_one_peltv(struct pnv_phb *phb, struct pnv_ioda_pe *parent, struct pnv_ioda_pe *child, bool is_add) { const char *desc = is_add ? "adding" : "removing"; uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN : OPAL_REMOVE_PE_FROM_DOMAIN; struct pnv_ioda_pe *slave; long rc; /* Parent PE affects child PE */ rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number, child->pe_number, op); if (rc != OPAL_SUCCESS) { pe_warn(child, "OPAL error %ld %s to parent PELTV\n", rc, desc); return -ENXIO; } if (!(child->flags & PNV_IODA_PE_MASTER)) return 0; /* Compound case: parent PE affects slave PEs */ list_for_each_entry(slave, &child->slaves, list) { rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number, slave->pe_number, op); if (rc != OPAL_SUCCESS) { pe_warn(slave, "OPAL error %ld %s to parent PELTV\n", rc, desc); return -ENXIO; } } return 0; } static int pnv_ioda_set_peltv(struct pnv_phb *phb, struct pnv_ioda_pe *pe, bool is_add) { struct pnv_ioda_pe *slave; struct pci_dev *pdev = NULL; int ret; /* * Clear PE frozen state. If it's master PE, we need * clear slave PE frozen state as well. */ if (is_add) { opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); if (pe->flags & PNV_IODA_PE_MASTER) { list_for_each_entry(slave, &pe->slaves, list) opal_pci_eeh_freeze_clear(phb->opal_id, slave->pe_number, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); } } /* * Associate PE in PELT. We need add the PE into the * corresponding PELT-V as well. Otherwise, the error * originated from the PE might contribute to other * PEs. */ ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add); if (ret) return ret; /* For compound PEs, any one affects all of them */ if (pe->flags & PNV_IODA_PE_MASTER) { list_for_each_entry(slave, &pe->slaves, list) { ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add); if (ret) return ret; } } if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS)) pdev = pe->pbus->self; else if (pe->flags & PNV_IODA_PE_DEV) pdev = pe->pdev->bus->self; #ifdef CONFIG_PCI_IOV else if (pe->flags & PNV_IODA_PE_VF) pdev = pe->parent_dev; #endif /* CONFIG_PCI_IOV */ while (pdev) { struct pci_dn *pdn = pci_get_pdn(pdev); struct pnv_ioda_pe *parent; if (pdn && pdn->pe_number != IODA_INVALID_PE) { parent = &phb->ioda.pe_array[pdn->pe_number]; ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add); if (ret) return ret; } pdev = pdev->bus->self; } return 0; } static void pnv_ioda_unset_peltv(struct pnv_phb *phb, struct pnv_ioda_pe *pe, struct pci_dev *parent) { int64_t rc; while (parent) { struct pci_dn *pdn = pci_get_pdn(parent); if (pdn && pdn->pe_number != IODA_INVALID_PE) { rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN); /* XXX What to do in case of error ? */ } parent = parent->bus->self; } opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); /* Disassociate PE in PELT */ rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number, pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN); if (rc) pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc); } int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { struct pci_dev *parent; uint8_t bcomp, dcomp, fcomp; int64_t rc; long rid_end, rid; /* Currently, we just deconfigure VF PE. Bus PE will always there.*/ if (pe->pbus) { int count; dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; parent = pe->pbus->self; if (pe->flags & PNV_IODA_PE_BUS_ALL) count = resource_size(&pe->pbus->busn_res); else count = 1; switch(count) { case 1: bcomp = OpalPciBusAll; break; case 2: bcomp = OpalPciBus7Bits; break; case 4: bcomp = OpalPciBus6Bits; break; case 8: bcomp = OpalPciBus5Bits; break; case 16: bcomp = OpalPciBus4Bits; break; case 32: bcomp = OpalPciBus3Bits; break; default: dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", count); /* Do an exact match only */ bcomp = OpalPciBusAll; } rid_end = pe->rid + (count << 8); } else { #ifdef CONFIG_PCI_IOV if (pe->flags & PNV_IODA_PE_VF) parent = pe->parent_dev; else #endif parent = pe->pdev->bus->self; bcomp = OpalPciBusAll; dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; rid_end = pe->rid + 1; } /* Clear the reverse map */ for (rid = pe->rid; rid < rid_end; rid++) phb->ioda.pe_rmap[rid] = IODA_INVALID_PE; /* * Release from all parents PELT-V. NPUs don't have a PELTV * table */ if (phb->type != PNV_PHB_NPU_OCAPI) pnv_ioda_unset_peltv(phb, pe, parent); rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, bcomp, dcomp, fcomp, OPAL_UNMAP_PE); if (rc) pe_err(pe, "OPAL error %lld trying to setup PELT table\n", rc); pe->pbus = NULL; pe->pdev = NULL; #ifdef CONFIG_PCI_IOV pe->parent_dev = NULL; #endif return 0; } int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { uint8_t bcomp, dcomp, fcomp; long rc, rid_end, rid; /* Bus validation ? */ if (pe->pbus) { int count; dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER; fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER; if (pe->flags & PNV_IODA_PE_BUS_ALL) count = resource_size(&pe->pbus->busn_res); else count = 1; switch(count) { case 1: bcomp = OpalPciBusAll; break; case 2: bcomp = OpalPciBus7Bits; break; case 4: bcomp = OpalPciBus6Bits; break; case 8: bcomp = OpalPciBus5Bits; break; case 16: bcomp = OpalPciBus4Bits; break; case 32: bcomp = OpalPciBus3Bits; break; default: dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", count); /* Do an exact match only */ bcomp = OpalPciBusAll; } rid_end = pe->rid + (count << 8); } else { bcomp = OpalPciBusAll; dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER; fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER; rid_end = pe->rid + 1; } /* * Associate PE in PELT. We need add the PE into the * corresponding PELT-V as well. Otherwise, the error * originated from the PE might contribute to other * PEs. */ rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, bcomp, dcomp, fcomp, OPAL_MAP_PE); if (rc) { pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); return -ENXIO; } /* * Configure PELTV. NPUs don't have a PELTV table so skip * configuration on them. */ if (phb->type != PNV_PHB_NPU_OCAPI) pnv_ioda_set_peltv(phb, pe, true); /* Setup reverse map */ for (rid = pe->rid; rid < rid_end; rid++) phb->ioda.pe_rmap[rid] = pe->pe_number; pe->mve_number = 0; return 0; } static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) { struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); struct pci_dn *pdn = pci_get_pdn(dev); struct pnv_ioda_pe *pe; if (!pdn) { pr_err("%s: Device tree node not associated properly\n", pci_name(dev)); return NULL; } if (pdn->pe_number != IODA_INVALID_PE) return NULL; pe = pnv_ioda_alloc_pe(phb, 1); if (!pe) { pr_warn("%s: Not enough PE# available, disabling device\n", pci_name(dev)); return NULL; } /* NOTE: We don't get a reference for the pointer in the PE * data structure, both the device and PE structures should be * destroyed at the same time. * * At some point we want to remove the PDN completely anyways */ pdn->pe_number = pe->pe_number; pe->flags = PNV_IODA_PE_DEV; pe->pdev = dev; pe->pbus = NULL; pe->mve_number = -1; pe->rid = dev->bus->number << 8 | pdn->devfn; pe->device_count++; pe_info(pe, "Associated device to PE\n"); if (pnv_ioda_configure_pe(phb, pe)) { /* XXX What do we do here ? */ pnv_ioda_free_pe(pe); pdn->pe_number = IODA_INVALID_PE; pe->pdev = NULL; return NULL; } /* Put PE to the list */ mutex_lock(&phb->ioda.pe_list_mutex); list_add_tail(&pe->list, &phb->ioda.pe_list); mutex_unlock(&phb->ioda.pe_list_mutex); return pe; } /* * There're 2 types of PCI bus sensitive PEs: One that is compromised of * single PCI bus. Another one that contains the primary PCI bus and its * subordinate PCI devices and buses. The second type of PE is normally * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports. */ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all) { struct pnv_phb *phb = pci_bus_to_pnvhb(bus); struct pnv_ioda_pe *pe = NULL; unsigned int pe_num; /* * In partial hotplug case, the PE instance might be still alive. * We should reuse it instead of allocating a new one. */ pe_num = phb->ioda.pe_rmap[bus->number << 8]; if (WARN_ON(pe_num != IODA_INVALID_PE)) { pe = &phb->ioda.pe_array[pe_num]; return NULL; } /* PE number for root bus should have been reserved */ if (pci_is_root_bus(bus)) pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx]; /* Check if PE is determined by M64 */ if (!pe) pe = pnv_ioda_pick_m64_pe(bus, all); /* The PE number isn't pinned by M64 */ if (!pe) pe = pnv_ioda_alloc_pe(phb, 1); if (!pe) { pr_warn("%s: Not enough PE# available for PCI bus %04x:%02x\n", __func__, pci_domain_nr(bus), bus->number); return NULL; } pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS); pe->pbus = bus; pe->pdev = NULL; pe->mve_number = -1; pe->rid = bus->busn_res.start << 8; if (all) pe_info(pe, "Secondary bus %pad..%pad associated with PE#%x\n", &bus->busn_res.start, &bus->busn_res.end, pe->pe_number); else pe_info(pe, "Secondary bus %pad associated with PE#%x\n", &bus->busn_res.start, pe->pe_number); if (pnv_ioda_configure_pe(phb, pe)) { /* XXX What do we do here ? */ pnv_ioda_free_pe(pe); pe->pbus = NULL; return NULL; } /* Put PE to the list */ list_add_tail(&pe->list, &phb->ioda.pe_list); return pe; } static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev) { struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct pci_dn *pdn = pci_get_pdn(pdev); struct pnv_ioda_pe *pe; /* Check if the BDFN for this device is associated with a PE yet */ pe = pnv_pci_bdfn_to_pe(phb, pci_dev_id(pdev)); if (!pe) { /* VF PEs should be pre-configured in pnv_pci_sriov_enable() */ if (WARN_ON(pdev->is_virtfn)) return; pnv_pci_configure_bus(pdev->bus); pe = pnv_pci_bdfn_to_pe(phb, pci_dev_id(pdev)); pci_info(pdev, "Configured PE#%x\n", pe ? pe->pe_number : 0xfffff); /* * If we can't setup the IODA PE something has gone horribly * wrong and we can't enable DMA for the device. */ if (WARN_ON(!pe)) return; } else { pci_info(pdev, "Added to existing PE#%x\n", pe->pe_number); } /* * We assume that bridges *probably* don't need to do any DMA so we can * skip allocating a TCE table, etc unless we get a non-bridge device. */ if (!pe->dma_setup_done && !pci_is_bridge(pdev)) { switch (phb->type) { case PNV_PHB_IODA2: pnv_pci_ioda2_setup_dma_pe(phb, pe); break; default: pr_warn("%s: No DMA for PHB#%x (type %d)\n", __func__, phb->hose->global_number, phb->type); } } if (pdn) pdn->pe_number = pe->pe_number; pe->device_count++; WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops); pdev->dev.archdata.dma_offset = pe->tce_bypass_base; set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]); /* PEs with a DMA weight of zero won't have a group */ if (pe->table_group.group) iommu_add_device(&pe->table_group, &pdev->dev); } /* * Reconfigure TVE#0 to be usable as 64-bit DMA space. * * The first 4GB of virtual memory for a PE is reserved for 32-bit accesses. * Devices can only access more than that if bit 59 of the PCI address is set * by hardware, which indicates TVE#1 should be used instead of TVE#0. * Many PCI devices are not capable of addressing that many bits, and as a * result are limited to the 4GB of virtual memory made available to 32-bit * devices in TVE#0. * * In order to work around this, reconfigure TVE#0 to be suitable for 64-bit * devices by configuring the virtual memory past the first 4GB inaccessible * by 64-bit DMAs. This should only be used by devices that want more than * 4GB, and only on PEs that have no 32-bit devices. * * Currently this will only work on PHB3 (POWER8). */ static int pnv_pci_ioda_dma_64bit_bypass(struct pnv_ioda_pe *pe) { u64 window_size, table_size, tce_count, addr; struct page *table_pages; u64 tce_order = 28; /* 256MB TCEs */ __be64 *tces; s64 rc; /* * Window size needs to be a power of two, but needs to account for * shifting memory by the 4GB offset required to skip 32bit space. */ window_size = roundup_pow_of_two(memory_hotplug_max() + (1ULL << 32)); tce_count = window_size >> tce_order; table_size = tce_count << 3; if (table_size < PAGE_SIZE) table_size = PAGE_SIZE; table_pages = alloc_pages_node(pe->phb->hose->node, GFP_KERNEL, get_order(table_size)); if (!table_pages) goto err; tces = page_address(table_pages); if (!tces) goto err; memset(tces, 0, table_size); for (addr = 0; addr < memory_hotplug_max(); addr += (1 << tce_order)) { tces[(addr + (1ULL << 32)) >> tce_order] = cpu_to_be64(addr | TCE_PCI_READ | TCE_PCI_WRITE); } rc = opal_pci_map_pe_dma_window(pe->phb->opal_id, pe->pe_number, /* reconfigure window 0 */ (pe->pe_number << 1) + 0, 1, __pa(tces), table_size, 1 << tce_order); if (rc == OPAL_SUCCESS) { pe_info(pe, "Using 64-bit DMA iommu bypass (through TVE#0)\n"); return 0; } err: pe_err(pe, "Error configuring 64-bit DMA bypass\n"); return -EIO; } static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev, u64 dma_mask) { struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct pci_dn *pdn = pci_get_pdn(pdev); struct pnv_ioda_pe *pe; if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE)) return false; pe = &phb->ioda.pe_array[pdn->pe_number]; if (pe->tce_bypass_enabled) { u64 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; if (dma_mask >= top) return true; } /* * If the device can't set the TCE bypass bit but still wants * to access 4GB or more, on PHB3 we can reconfigure TVE#0 to * bypass the 32-bit region and be usable for 64-bit DMAs. * The device needs to be able to address all of this space. */ if (dma_mask >> 32 && dma_mask > (memory_hotplug_max() + (1ULL << 32)) && /* pe->pdev should be set if it's a single device, pe->pbus if not */ (pe->device_count == 1 || !pe->pbus) && phb->model == PNV_PHB_MODEL_PHB3) { /* Configure the bypass mode */ s64 rc = pnv_pci_ioda_dma_64bit_bypass(pe); if (rc) return false; /* 4GB offset bypasses 32-bit space */ pdev->dev.archdata.dma_offset = (1ULL << 32); return true; } return false; } static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb) { return phb->regs + 0x210; } #ifdef CONFIG_IOMMU_API /* Common for IODA1 and IODA2 */ static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index, unsigned long *hpa, enum dma_data_direction *direction) { return pnv_tce_xchg(tbl, index, hpa, direction); } #endif #define PHB3_TCE_KILL_INVAL_ALL PPC_BIT(0) #define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1) #define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2) static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe) { /* 01xb - invalidate TCEs that match the specified PE# */ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF); mb(); /* Ensure above stores are visible */ __raw_writeq_be(val, invalidate); } static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, unsigned shift, unsigned long index, unsigned long npages) { __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb); unsigned long start, end, inc; /* We'll invalidate DMA address in PE scope */ start = PHB3_TCE_KILL_INVAL_ONE; start |= (pe->pe_number & 0xFF); end = start; /* Figure out the start, end and step */ start |= (index << shift); end |= ((index + npages - 1) << shift); inc = (0x1ull << shift); mb(); while (start <= end) { __raw_writeq_be(start, invalidate); start += inc; } } static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe) { struct pnv_phb *phb = pe->phb; if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) pnv_pci_phb3_tce_invalidate_pe(pe); else opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PE, pe->pe_number, 0, 0, 0); } static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, unsigned long index, unsigned long npages) { struct iommu_table_group_link *tgl; list_for_each_entry_lockless(tgl, &tbl->it_group_list, next) { struct pnv_ioda_pe *pe = container_of(tgl->table_group, struct pnv_ioda_pe, table_group); struct pnv_phb *phb = pe->phb; unsigned int shift = tbl->it_page_shift; if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs) pnv_pci_phb3_tce_invalidate(pe, shift, index, npages); else opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL_PAGES, pe->pe_number, 1u << shift, index << shift, npages); } } static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, unsigned long attrs) { int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, attrs); if (!ret) pnv_pci_ioda2_tce_invalidate(tbl, index, npages); return ret; } static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index, long npages) { pnv_tce_free(tbl, index, npages); pnv_pci_ioda2_tce_invalidate(tbl, index, npages); } static struct iommu_table_ops pnv_ioda2_iommu_ops = { .set = pnv_ioda2_tce_build, #ifdef CONFIG_IOMMU_API .xchg_no_kill = pnv_ioda_tce_xchg_no_kill, .tce_kill = pnv_pci_ioda2_tce_invalidate, .useraddrptr = pnv_tce_useraddrptr, #endif .clear = pnv_ioda2_tce_free, .get = pnv_tce_get, .free = pnv_pci_ioda2_table_free_pages, }; static long pnv_pci_ioda2_set_window(struct iommu_table_group *table_group, int num, struct iommu_table *tbl) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); struct pnv_phb *phb = pe->phb; int64_t rc; const unsigned long size = tbl->it_indirect_levels ? tbl->it_level_size : tbl->it_size; const __u64 start_addr = tbl->it_offset << tbl->it_page_shift; const __u64 win_size = tbl->it_size << tbl->it_page_shift; pe_info(pe, "Setting up window#%d %llx..%llx pg=%lx\n", num, start_addr, start_addr + win_size - 1, IOMMU_PAGE_SIZE(tbl)); /* * Map TCE table through TVT. The TVE index is the PE number * shifted by 1 bit for 32-bits DMA space. */ rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, (pe->pe_number << 1) + num, tbl->it_indirect_levels + 1, __pa(tbl->it_base), size << 3, IOMMU_PAGE_SIZE(tbl)); if (rc) { pe_err(pe, "Failed to configure TCE table, err %lld\n", rc); return rc; } pnv_pci_link_table_and_group(phb->hose->node, num, tbl, &pe->table_group); pnv_pci_ioda2_tce_invalidate_pe(pe); return 0; } static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable) { uint16_t window_id = (pe->pe_number << 1 ) + 1; int64_t rc; pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); if (enable) { phys_addr_t top = memblock_end_of_DRAM(); top = roundup_pow_of_two(top); rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, pe->pe_number, window_id, pe->tce_bypass_base, top); } else { rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, pe->pe_number, window_id, pe->tce_bypass_base, 0); } if (rc) pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); else pe->tce_bypass_enabled = enable; } static long pnv_pci_ioda2_create_table(struct iommu_table_group *table_group, int num, __u32 page_shift, __u64 window_size, __u32 levels, bool alloc_userspace_copy, struct iommu_table **ptbl) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); int nid = pe->phb->hose->node; __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start; long ret; struct iommu_table *tbl; tbl = pnv_pci_table_alloc(nid); if (!tbl) return -ENOMEM; tbl->it_ops = &pnv_ioda2_iommu_ops; ret = pnv_pci_ioda2_table_alloc_pages(nid, bus_offset, page_shift, window_size, levels, alloc_userspace_copy, tbl); if (ret) { iommu_tce_table_put(tbl); return ret; } *ptbl = tbl; return 0; } static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) { struct iommu_table *tbl = NULL; long rc; unsigned long res_start, res_end; /* * crashkernel= specifies the kdump kernel's maximum memory at * some offset and there is no guaranteed the result is a power * of 2, which will cause errors later. */ const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max()); /* * In memory constrained environments, e.g. kdump kernel, the * DMA window can be larger than available memory, which will * cause errors later. */ const u64 maxblock = 1UL << (PAGE_SHIFT + MAX_ORDER); /* * We create the default window as big as we can. The constraint is * the max order of allocation possible. The TCE table is likely to * end up being multilevel and with on-demand allocation in place, * the initial use is not going to be huge as the default window aims * to support crippled devices (i.e. not fully 64bit DMAble) only. */ /* iommu_table::it_map uses 1 bit per IOMMU page, hence 8 */ const u64 window_size = min((maxblock * 8) << PAGE_SHIFT, max_memory); /* Each TCE level cannot exceed maxblock so go multilevel if needed */ unsigned long tces_order = ilog2(window_size >> PAGE_SHIFT); unsigned long tcelevel_order = ilog2(maxblock >> 3); unsigned int levels = tces_order / tcelevel_order; if (tces_order % tcelevel_order) levels += 1; /* * We try to stick to default levels (which is >1 at the moment) in * order to save memory by relying on on-demain TCE level allocation. */ levels = max_t(unsigned int, levels, POWERNV_IOMMU_DEFAULT_LEVELS); rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, PAGE_SHIFT, window_size, levels, false, &tbl); if (rc) { pe_err(pe, "Failed to create 32-bit TCE table, err %ld", rc); return rc; } /* We use top part of 32bit space for MMIO so exclude it from DMA */ res_start = 0; res_end = 0; if (window_size > pe->phb->ioda.m32_pci_base) { res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift; res_end = min(window_size, SZ_4G) >> tbl->it_page_shift; } tbl->it_index = (pe->phb->hose->global_number << 16) | pe->pe_number; if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end)) rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); else rc = -ENOMEM; if (rc) { pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", rc); iommu_tce_table_put(tbl); tbl = NULL; /* This clears iommu_table_base below */ } if (!pnv_iommu_bypass_disabled) pnv_pci_ioda2_set_bypass(pe, true); /* * Set table base for the case of IOMMU DMA use. Usually this is done * from dma_dev_setup() which is not called when a device is returned * from VFIO so do it here. */ if (pe->pdev) set_iommu_table_base(&pe->pdev->dev, tbl); return 0; } static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group, int num) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); struct pnv_phb *phb = pe->phb; long ret; pe_info(pe, "Removing DMA window #%d\n", num); ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, (pe->pe_number << 1) + num, 0/* levels */, 0/* table address */, 0/* table size */, 0/* page size */); if (ret) pe_warn(pe, "Unmapping failed, ret = %ld\n", ret); else pnv_pci_ioda2_tce_invalidate_pe(pe); pnv_pci_unlink_table_and_group(table_group->tables[num], table_group); return ret; } #ifdef CONFIG_IOMMU_API unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift, __u64 window_size, __u32 levels) { unsigned long bytes = 0; const unsigned window_shift = ilog2(window_size); unsigned entries_shift = window_shift - page_shift; unsigned table_shift = entries_shift + 3; unsigned long tce_table_size = max(0x1000UL, 1UL << table_shift); unsigned long direct_table_size; if (!levels || (levels > POWERNV_IOMMU_MAX_LEVELS) || !is_power_of_2(window_size)) return 0; /* Calculate a direct table size from window_size and levels */ entries_shift = (entries_shift + levels - 1) / levels; table_shift = entries_shift + 3; table_shift = max_t(unsigned, table_shift, PAGE_SHIFT); direct_table_size = 1UL << table_shift; for ( ; levels; --levels) { bytes += ALIGN(tce_table_size, direct_table_size); tce_table_size /= direct_table_size; tce_table_size <<= 3; tce_table_size = max_t(unsigned long, tce_table_size, direct_table_size); } return bytes + bytes; /* one for HW table, one for userspace copy */ } static long pnv_pci_ioda2_create_table_userspace( struct iommu_table_group *table_group, int num, __u32 page_shift, __u64 window_size, __u32 levels, struct iommu_table **ptbl) { long ret = pnv_pci_ioda2_create_table(table_group, num, page_shift, window_size, levels, true, ptbl); if (!ret) (*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size( page_shift, window_size, levels); return ret; } static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); dev->dev.archdata.dma_offset = pe->tce_bypass_base; if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) pnv_ioda_setup_bus_dma(pe, dev->subordinate); } } static long pnv_ioda2_take_ownership(struct iommu_table_group *table_group) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); /* Store @tbl as pnv_pci_ioda2_unset_window() resets it */ struct iommu_table *tbl = pe->table_group.tables[0]; /* * iommu_ops transfers the ownership per a device and we mode * the group ownership with the first device in the group. */ if (!tbl) return 0; pnv_pci_ioda2_set_bypass(pe, false); pnv_pci_ioda2_unset_window(&pe->table_group, 0); if (pe->pbus) pnv_ioda_setup_bus_dma(pe, pe->pbus); else if (pe->pdev) set_iommu_table_base(&pe->pdev->dev, NULL); iommu_tce_table_put(tbl); return 0; } static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) { struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, table_group); /* See the comment about iommu_ops above */ if (pe->table_group.tables[0]) return; pnv_pci_ioda2_setup_default_config(pe); if (pe->pbus) pnv_ioda_setup_bus_dma(pe, pe->pbus); } static struct iommu_table_group_ops pnv_pci_ioda2_ops = { .get_table_size = pnv_pci_ioda2_get_table_size, .create_table = pnv_pci_ioda2_create_table_userspace, .set_window = pnv_pci_ioda2_set_window, .unset_window = pnv_pci_ioda2_unset_window, .take_ownership = pnv_ioda2_take_ownership, .release_ownership = pnv_ioda2_release_ownership, }; #endif void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) { int64_t rc; /* TVE #1 is selected by PCI address bit 59 */ pe->tce_bypass_base = 1ull << 59; /* The PE will reserve all possible 32-bits space */ pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n", phb->ioda.m32_pci_base); /* Setup linux iommu table */ pe->table_group.tce32_start = 0; pe->table_group.tce32_size = phb->ioda.m32_pci_base; pe->table_group.max_dynamic_windows_supported = IOMMU_TABLE_GROUP_MAX_TABLES; pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS; pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb); rc = pnv_pci_ioda2_setup_default_config(pe); if (rc) return; #ifdef CONFIG_IOMMU_API pe->table_group.ops = &pnv_pci_ioda2_ops; iommu_register_group(&pe->table_group, phb->hose->global_number, pe->pe_number); #endif pe->dma_setup_done = true; } /* * Called from KVM in real mode to EOI passthru interrupts. The ICP * EOI is handled directly in KVM in kvmppc_deliver_irq_passthru(). * * The IRQ data is mapped in the PCI-MSI domain and the EOI OPAL call * needs an HW IRQ number mapped in the XICS IRQ domain. The HW IRQ * numbers of the in-the-middle MSI domain are vector numbers and it's * good enough for OPAL. Use that. */ int64_t pnv_opal_pci_msi_eoi(struct irq_data *d) { struct pci_controller *hose = irq_data_get_irq_chip_data(d->parent_data); struct pnv_phb *phb = hose->private_data; return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq); } /* * The IRQ data is mapped in the XICS domain, with OPAL HW IRQ numbers */ static void pnv_ioda2_msi_eoi(struct irq_data *d) { int64_t rc; unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); struct pci_controller *hose = irq_data_get_irq_chip_data(d); struct pnv_phb *phb = hose->private_data; rc = opal_pci_msi_eoi(phb->opal_id, hw_irq); WARN_ON_ONCE(rc); icp_native_eoi(d); } /* P8/CXL only */ void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq) { struct irq_data *idata; struct irq_chip *ichip; /* The MSI EOI OPAL call is only needed on PHB3 */ if (phb->model != PNV_PHB_MODEL_PHB3) return; if (!phb->ioda.irq_chip_init) { /* * First time we setup an MSI IRQ, we need to setup the * corresponding IRQ chip to route correctly. */ idata = irq_get_irq_data(virq); ichip = irq_data_get_irq_chip(idata); phb->ioda.irq_chip_init = 1; phb->ioda.irq_chip = *ichip; phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi; } irq_set_chip(virq, &phb->ioda.irq_chip); irq_set_chip_data(virq, phb->hose); } static struct irq_chip pnv_pci_msi_irq_chip; /* * Returns true iff chip is something that we could call * pnv_opal_pci_msi_eoi for. */ bool is_pnv_opal_msi(struct irq_chip *chip) { return chip == &pnv_pci_msi_irq_chip; } EXPORT_SYMBOL_GPL(is_pnv_opal_msi); static int __pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, unsigned int xive_num, unsigned int is_64, struct msi_msg *msg) { struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); __be32 data; int rc; dev_dbg(&dev->dev, "%s: setup %s-bit MSI for vector #%d\n", __func__, is_64 ? "64" : "32", xive_num); /* No PE assigned ? bail out ... no MSI for you ! */ if (pe == NULL) return -ENXIO; /* Check if we have an MVE */ if (pe->mve_number < 0) return -ENXIO; /* Force 32-bit MSI on some broken devices */ if (dev->no_64bit_msi) is_64 = 0; /* Assign XIVE to PE */ rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); if (rc) { pr_warn("%s: OPAL error %d setting XIVE %d PE\n", pci_name(dev), rc, xive_num); return -EIO; } if (is_64) { __be64 addr64; rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1, &addr64, &data); if (rc) { pr_warn("%s: OPAL error %d getting 64-bit MSI data\n", pci_name(dev), rc); return -EIO; } msg->address_hi = be64_to_cpu(addr64) >> 32; msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful; } else { __be32 addr32; rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1, &addr32, &data); if (rc) { pr_warn("%s: OPAL error %d getting 32-bit MSI data\n", pci_name(dev), rc); return -EIO; } msg->address_hi = 0; msg->address_lo = be32_to_cpu(addr32); } msg->data = be32_to_cpu(data); return 0; } /* * The msi_free() op is called before irq_domain_free_irqs_top() when * the handler data is still available. Use that to clear the XIVE * controller. */ static void pnv_msi_ops_msi_free(struct irq_domain *domain, struct msi_domain_info *info, unsigned int irq) { if (xive_enabled()) xive_irq_free_data(irq); } static struct msi_domain_ops pnv_pci_msi_domain_ops = { .msi_free = pnv_msi_ops_msi_free, }; static void pnv_msi_shutdown(struct irq_data *d) { d = d->parent_data; if (d->chip->irq_shutdown) d->chip->irq_shutdown(d); } static void pnv_msi_mask(struct irq_data *d) { pci_msi_mask_irq(d); irq_chip_mask_parent(d); } static void pnv_msi_unmask(struct irq_data *d) { pci_msi_unmask_irq(d); irq_chip_unmask_parent(d); } static struct irq_chip pnv_pci_msi_irq_chip = { .name = "PNV-PCI-MSI", .irq_shutdown = pnv_msi_shutdown, .irq_mask = pnv_msi_mask, .irq_unmask = pnv_msi_unmask, .irq_eoi = irq_chip_eoi_parent, }; static struct msi_domain_info pnv_msi_domain_info = { .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), .ops = &pnv_pci_msi_domain_ops, .chip = &pnv_pci_msi_irq_chip, }; static void pnv_msi_compose_msg(struct irq_data *d, struct msi_msg *msg) { struct msi_desc *entry = irq_data_get_msi_desc(d); struct pci_dev *pdev = msi_desc_to_pci_dev(entry); struct pci_controller *hose = irq_data_get_irq_chip_data(d); struct pnv_phb *phb = hose->private_data; int rc; rc = __pnv_pci_ioda_msi_setup(phb, pdev, d->hwirq, entry->pci.msi_attrib.is_64, msg); if (rc) dev_err(&pdev->dev, "Failed to setup %s-bit MSI #%ld : %d\n", entry->pci.msi_attrib.is_64 ? "64" : "32", d->hwirq, rc); } /* * The IRQ data is mapped in the MSI domain in which HW IRQ numbers * correspond to vector numbers. */ static void pnv_msi_eoi(struct irq_data *d) { struct pci_controller *hose = irq_data_get_irq_chip_data(d); struct pnv_phb *phb = hose->private_data; if (phb->model == PNV_PHB_MODEL_PHB3) { /* * The EOI OPAL call takes an OPAL HW IRQ number but * since it is translated into a vector number in * OPAL, use that directly. */ WARN_ON_ONCE(opal_pci_msi_eoi(phb->opal_id, d->hwirq)); } irq_chip_eoi_parent(d); } static struct irq_chip pnv_msi_irq_chip = { .name = "PNV-MSI", .irq_shutdown = pnv_msi_shutdown, .irq_mask = irq_chip_mask_parent, .irq_unmask = irq_chip_unmask_parent, .irq_eoi = pnv_msi_eoi, .irq_set_affinity = irq_chip_set_affinity_parent, .irq_compose_msi_msg = pnv_msi_compose_msg, }; static int pnv_irq_parent_domain_alloc(struct irq_domain *domain, unsigned int virq, int hwirq) { struct irq_fwspec parent_fwspec; int ret; parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param_count = 2; parent_fwspec.param[0] = hwirq; parent_fwspec.param[1] = IRQ_TYPE_EDGE_RISING; ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec); if (ret) return ret; return 0; } static int pnv_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { struct pci_controller *hose = domain->host_data; struct pnv_phb *phb = hose->private_data; msi_alloc_info_t *info = arg; struct pci_dev *pdev = msi_desc_to_pci_dev(info->desc); int hwirq; int i, ret; hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, nr_irqs); if (hwirq < 0) { dev_warn(&pdev->dev, "failed to find a free MSI\n"); return -ENOSPC; } dev_dbg(&pdev->dev, "%s bridge %pOF %d/%x #%d\n", __func__, hose->dn, virq, hwirq, nr_irqs); for (i = 0; i < nr_irqs; i++) { ret = pnv_irq_parent_domain_alloc(domain, virq + i, phb->msi_base + hwirq + i); if (ret) goto out; irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &pnv_msi_irq_chip, hose); } return 0; out: irq_domain_free_irqs_parent(domain, virq, i - 1); msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, nr_irqs); return ret; } static void pnv_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct pci_controller *hose = irq_data_get_irq_chip_data(d); struct pnv_phb *phb = hose->private_data; pr_debug("%s bridge %pOF %d/%lx #%d\n", __func__, hose->dn, virq, d->hwirq, nr_irqs); msi_bitmap_free_hwirqs(&phb->msi_bmp, d->hwirq, nr_irqs); /* XIVE domain is cleared through ->msi_free() */ } static const struct irq_domain_ops pnv_irq_domain_ops = { .alloc = pnv_irq_domain_alloc, .free = pnv_irq_domain_free, }; static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned int count) { struct pnv_phb *phb = hose->private_data; struct irq_domain *parent = irq_get_default_host(); hose->fwnode = irq_domain_alloc_named_id_fwnode("PNV-MSI", phb->opal_id); if (!hose->fwnode) return -ENOMEM; hose->dev_domain = irq_domain_create_hierarchy(parent, 0, count, hose->fwnode, &pnv_irq_domain_ops, hose); if (!hose->dev_domain) { pr_err("PCI: failed to create IRQ domain bridge %pOF (domain %d)\n", hose->dn, hose->global_number); irq_domain_free_fwnode(hose->fwnode); return -ENOMEM; } hose->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(hose->dn), &pnv_msi_domain_info, hose->dev_domain); if (!hose->msi_domain) { pr_err("PCI: failed to create MSI IRQ domain bridge %pOF (domain %d)\n", hose->dn, hose->global_number); irq_domain_free_fwnode(hose->fwnode); irq_domain_remove(hose->dev_domain); return -ENOMEM; } return 0; } static void __init pnv_pci_init_ioda_msis(struct pnv_phb *phb) { unsigned int count; const __be32 *prop = of_get_property(phb->hose->dn, "ibm,opal-msi-ranges", NULL); if (!prop) { /* BML Fallback */ prop = of_get_property(phb->hose->dn, "msi-ranges", NULL); } if (!prop) return; phb->msi_base = be32_to_cpup(prop); count = be32_to_cpup(prop + 1); if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) { pr_err("PCI %d: Failed to allocate MSI bitmap !\n", phb->hose->global_number); return; } pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n", count, phb->msi_base); pnv_msi_allocate_domains(phb->hose, count); } static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe, struct resource *res) { struct pnv_phb *phb = pe->phb; struct pci_bus_region region; int index; int64_t rc; if (!res || !res->flags || res->start > res->end || res->flags & IORESOURCE_UNSET) return; if (res->flags & IORESOURCE_IO) { region.start = res->start - phb->ioda.io_pci_base; region.end = res->end - phb->ioda.io_pci_base; index = region.start / phb->ioda.io_segsize; while (index < phb->ioda.total_pe_num && region.start <= region.end) { phb->ioda.io_segmap[index] = pe->pe_number; rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index); if (rc != OPAL_SUCCESS) { pr_err("%s: Error %lld mapping IO segment#%d to PE#%x\n", __func__, rc, index, pe->pe_number); break; } region.start += phb->ioda.io_segsize; index++; } } else if ((res->flags & IORESOURCE_MEM) && !pnv_pci_is_m64(phb, res)) { region.start = res->start - phb->hose->mem_offset[0] - phb->ioda.m32_pci_base; region.end = res->end - phb->hose->mem_offset[0] - phb->ioda.m32_pci_base; index = region.start / phb->ioda.m32_segsize; while (index < phb->ioda.total_pe_num && region.start <= region.end) { phb->ioda.m32_segmap[index] = pe->pe_number; rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index); if (rc != OPAL_SUCCESS) { pr_err("%s: Error %lld mapping M32 segment#%d to PE#%x", __func__, rc, index, pe->pe_number); break; } region.start += phb->ioda.m32_segsize; index++; } } } /* * This function is supposed to be called on basis of PE from top * to bottom style. So the I/O or MMIO segment assigned to * parent PE could be overridden by its child PEs if necessary. */ static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe) { struct pci_dev *pdev; int i; /* * NOTE: We only care PCI bus based PE for now. For PCI * device based PE, for example SRIOV sensitive VF should * be figured out later. */ BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))); list_for_each_entry(pdev, &pe->pbus->devices, bus_list) { for (i = 0; i <= PCI_ROM_RESOURCE; i++) pnv_ioda_setup_pe_res(pe, &pdev->resource[i]); /* * If the PE contains all subordinate PCI buses, the * windows of the child bridges should be mapped to * the PE as well. */ if (!(pe->flags & PNV_IODA_PE_BUS_ALL) || !pci_is_bridge(pdev)) continue; for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) pnv_ioda_setup_pe_res(pe, &pdev->resource[PCI_BRIDGE_RESOURCES + i]); } } #ifdef CONFIG_DEBUG_FS static int pnv_pci_diag_data_set(void *data, u64 val) { struct pnv_phb *phb = data; s64 ret; /* Retrieve the diag data from firmware */ ret = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data, phb->diag_data_size); if (ret != OPAL_SUCCESS) return -EIO; /* Print the diag data to the kernel log */ pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_diag_data_fops, NULL, pnv_pci_diag_data_set, "%llu\n"); static int pnv_pci_ioda_pe_dump(void *data, u64 val) { struct pnv_phb *phb = data; int pe_num; for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) { struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_num]; if (!test_bit(pe_num, phb->ioda.pe_alloc)) continue; pe_warn(pe, "rid: %04x dev count: %2d flags: %s%s%s%s%s%s\n", pe->rid, pe->device_count, (pe->flags & PNV_IODA_PE_DEV) ? "dev " : "", (pe->flags & PNV_IODA_PE_BUS) ? "bus " : "", (pe->flags & PNV_IODA_PE_BUS_ALL) ? "all " : "", (pe->flags & PNV_IODA_PE_MASTER) ? "master " : "", (pe->flags & PNV_IODA_PE_SLAVE) ? "slave " : "", (pe->flags & PNV_IODA_PE_VF) ? "vf " : ""); } return 0; } DEFINE_DEBUGFS_ATTRIBUTE(pnv_pci_ioda_pe_dump_fops, NULL, pnv_pci_ioda_pe_dump, "%llu\n"); #endif /* CONFIG_DEBUG_FS */ static void pnv_pci_ioda_create_dbgfs(void) { #ifdef CONFIG_DEBUG_FS struct pci_controller *hose, *tmp; struct pnv_phb *phb; char name[16]; list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { phb = hose->private_data; sprintf(name, "PCI%04x", hose->global_number); phb->dbgfs = debugfs_create_dir(name, arch_debugfs_dir); debugfs_create_file_unsafe("dump_diag_regs", 0200, phb->dbgfs, phb, &pnv_pci_diag_data_fops); debugfs_create_file_unsafe("dump_ioda_pe_state", 0200, phb->dbgfs, phb, &pnv_pci_ioda_pe_dump_fops); } #endif /* CONFIG_DEBUG_FS */ } static void pnv_pci_enable_bridge(struct pci_bus *bus) { struct pci_dev *dev = bus->self; struct pci_bus *child; /* Empty bus ? bail */ if (list_empty(&bus->devices)) return; /* * If there's a bridge associated with that bus enable it. This works * around races in the generic code if the enabling is done during * parallel probing. This can be removed once those races have been * fixed. */ if (dev) { int rc = pci_enable_device(dev); if (rc) pci_err(dev, "Error enabling bridge (%d)\n", rc); pci_set_master(dev); } /* Perform the same to child busses */ list_for_each_entry(child, &bus->children, node) pnv_pci_enable_bridge(child); } static void pnv_pci_enable_bridges(void) { struct pci_controller *hose; list_for_each_entry(hose, &hose_list, list_node) pnv_pci_enable_bridge(hose->bus); } static void pnv_pci_ioda_fixup(void) { pnv_pci_ioda_create_dbgfs(); pnv_pci_enable_bridges(); #ifdef CONFIG_EEH pnv_eeh_post_init(); #endif } /* * Returns the alignment for I/O or memory windows for P2P * bridges. That actually depends on how PEs are segmented. * For now, we return I/O or M32 segment size for PE sensitive * P2P bridges. Otherwise, the default values (4KiB for I/O, * 1MiB for memory) will be returned. * * The current PCI bus might be put into one PE, which was * create against the parent PCI bridge. For that case, we * needn't enlarge the alignment so that we can save some * resources. */ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus, unsigned long type) { struct pnv_phb *phb = pci_bus_to_pnvhb(bus); int num_pci_bridges = 0; struct pci_dev *bridge; bridge = bus->self; while (bridge) { if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) { num_pci_bridges++; if (num_pci_bridges >= 2) return 1; } bridge = bridge->bus->self; } /* * We fall back to M32 if M64 isn't supported. We enforce the M64 * alignment for any 64-bit resource, PCIe doesn't care and * bridges only do 64-bit prefetchable anyway. */ if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type)) return phb->ioda.m64_segsize; if (type & IORESOURCE_MEM) return phb->ioda.m32_segsize; return phb->ioda.io_segsize; } /* * We are updating root port or the upstream port of the * bridge behind the root port with PHB's windows in order * to accommodate the changes on required resources during * PCI (slot) hotplug, which is connected to either root * port or the downstream ports of PCIe switch behind the * root port. */ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus, unsigned long type) { struct pci_controller *hose = pci_bus_to_host(bus); struct pnv_phb *phb = hose->private_data; struct pci_dev *bridge = bus->self; struct resource *r, *w; bool msi_region = false; int i; /* Check if we need apply fixup to the bridge's windows */ if (!pci_is_root_bus(bridge->bus) && !pci_is_root_bus(bridge->bus->self->bus)) return; /* Fixup the resources */ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { r = &bridge->resource[PCI_BRIDGE_RESOURCES + i]; if (!r->flags || !r->parent) continue; w = NULL; if (r->flags & type & IORESOURCE_IO) w = &hose->io_resource; else if (pnv_pci_is_m64(phb, r) && (type & IORESOURCE_PREFETCH) && phb->ioda.m64_segsize) w = &hose->mem_resources[1]; else if (r->flags & type & IORESOURCE_MEM) { w = &hose->mem_resources[0]; msi_region = true; } r->start = w->start; r->end = w->end; /* The 64KB 32-bits MSI region shouldn't be included in * the 32-bits bridge window. Otherwise, we can see strange * issues. One of them is EEH error observed on Garrison. * * Exclude top 1MB region which is the minimal alignment of * 32-bits bridge window. */ if (msi_region) { r->end += 0x10000; r->end -= 0x100000; } } } static void pnv_pci_configure_bus(struct pci_bus *bus) { struct pci_dev *bridge = bus->self; struct pnv_ioda_pe *pe; bool all = (bridge && pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE); dev_info(&bus->dev, "Configuring PE for bus\n"); /* Don't assign PE to PCI bus, which doesn't have subordinate devices */ if (WARN_ON(list_empty(&bus->devices))) return; /* Reserve PEs according to used M64 resources */ pnv_ioda_reserve_m64_pe(bus, NULL, all); /* * Assign PE. We might run here because of partial hotplug. * For the case, we just pick up the existing PE and should * not allocate resources again. */ pe = pnv_ioda_setup_bus_PE(bus, all); if (!pe) return; pnv_ioda_setup_pe_seg(pe); } static resource_size_t pnv_pci_default_alignment(void) { return PAGE_SIZE; } /* Prevent enabling devices for which we couldn't properly * assign a PE */ static bool pnv_pci_enable_device_hook(struct pci_dev *dev) { struct pci_dn *pdn; pdn = pci_get_pdn(dev); if (!pdn || pdn->pe_number == IODA_INVALID_PE) { pci_err(dev, "pci_enable_device() blocked, no PE assigned.\n"); return false; } return true; } static bool pnv_ocapi_enable_device_hook(struct pci_dev *dev) { struct pci_dn *pdn; struct pnv_ioda_pe *pe; pdn = pci_get_pdn(dev); if (!pdn) return false; if (pdn->pe_number == IODA_INVALID_PE) { pe = pnv_ioda_setup_dev_PE(dev); if (!pe) return false; } return true; } void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe) { struct iommu_table *tbl = pe->table_group.tables[0]; int64_t rc; if (!pe->dma_setup_done) return; rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); if (rc) pe_warn(pe, "OPAL error %lld release DMA window\n", rc); pnv_pci_ioda2_set_bypass(pe, false); if (pe->table_group.group) { iommu_group_put(pe->table_group.group); WARN_ON(pe->table_group.group); } iommu_tce_table_put(tbl); } static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe, unsigned short win, unsigned int *map) { struct pnv_phb *phb = pe->phb; int idx; int64_t rc; for (idx = 0; idx < phb->ioda.total_pe_num; idx++) { if (map[idx] != pe->pe_number) continue; rc = opal_pci_map_pe_mmio_window(phb->opal_id, phb->ioda.reserved_pe_idx, win, 0, idx); if (rc != OPAL_SUCCESS) pe_warn(pe, "Error %lld unmapping (%d) segment#%d\n", rc, win, idx); map[idx] = IODA_INVALID_PE; } } static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe) { struct pnv_phb *phb = pe->phb; if (phb->type == PNV_PHB_IODA2) { pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE, phb->ioda.m32_segmap); } } static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe) { struct pnv_phb *phb = pe->phb; struct pnv_ioda_pe *slave, *tmp; pe_info(pe, "Releasing PE\n"); mutex_lock(&phb->ioda.pe_list_mutex); list_del(&pe->list); mutex_unlock(&phb->ioda.pe_list_mutex); switch (phb->type) { case PNV_PHB_IODA2: pnv_pci_ioda2_release_pe_dma(pe); break; case PNV_PHB_NPU_OCAPI: break; default: WARN_ON(1); } pnv_ioda_release_pe_seg(pe); pnv_ioda_deconfigure_pe(pe->phb, pe); /* Release slave PEs in the compound PE */ if (pe->flags & PNV_IODA_PE_MASTER) { list_for_each_entry_safe(slave, tmp, &pe->slaves, list) { list_del(&slave->list); pnv_ioda_free_pe(slave); } } /* * The PE for root bus can be removed because of hotplug in EEH * recovery for fenced PHB error. We need to mark the PE dead so * that it can be populated again in PCI hot add path. The PE * shouldn't be destroyed as it's the global reserved resource. */ if (phb->ioda.root_pe_idx == pe->pe_number) return; pnv_ioda_free_pe(pe); } static void pnv_pci_release_device(struct pci_dev *pdev) { struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct pci_dn *pdn = pci_get_pdn(pdev); struct pnv_ioda_pe *pe; /* The VF PE state is torn down when sriov_disable() is called */ if (pdev->is_virtfn) return; if (!pdn || pdn->pe_number == IODA_INVALID_PE) return; #ifdef CONFIG_PCI_IOV /* * FIXME: Try move this to sriov_disable(). It's here since we allocate * the iov state at probe time since we need to fiddle with the IOV * resources. */ if (pdev->is_physfn) kfree(pdev->dev.archdata.iov_data); #endif /* * PCI hotplug can happen as part of EEH error recovery. The @pdn * isn't removed and added afterwards in this scenario. We should * set the PE number in @pdn to an invalid one. Otherwise, the PE's * device count is decreased on removing devices while failing to * be increased on adding devices. It leads to unbalanced PE's device * count and eventually make normal PCI hotplug path broken. */ pe = &phb->ioda.pe_array[pdn->pe_number]; pdn->pe_number = IODA_INVALID_PE; WARN_ON(--pe->device_count < 0); if (pe->device_count == 0) pnv_ioda_release_pe(pe); } static void pnv_pci_ioda_shutdown(struct pci_controller *hose) { struct pnv_phb *phb = hose->private_data; opal_pci_reset(phb->opal_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET); } static void pnv_pci_ioda_dma_bus_setup(struct pci_bus *bus) { struct pnv_phb *phb = pci_bus_to_pnvhb(bus); struct pnv_ioda_pe *pe; list_for_each_entry(pe, &phb->ioda.pe_list, list) { if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))) continue; if (!pe->pbus) continue; if (bus->number == ((pe->rid >> 8) & 0xFF)) { pe->pbus = bus; break; } } } #ifdef CONFIG_IOMMU_API static struct iommu_group *pnv_pci_device_group(struct pci_controller *hose, struct pci_dev *pdev) { struct pnv_phb *phb = hose->private_data; struct pnv_ioda_pe *pe; if (WARN_ON(!phb)) return ERR_PTR(-ENODEV); pe = pnv_pci_bdfn_to_pe(phb, pci_dev_id(pdev)); if (!pe) return ERR_PTR(-ENODEV); if (!pe->table_group.group) return ERR_PTR(-ENODEV); return iommu_group_ref_get(pe->table_group.group); } #endif static const struct pci_controller_ops pnv_pci_ioda_controller_ops = { .dma_dev_setup = pnv_pci_ioda_dma_dev_setup, .dma_bus_setup = pnv_pci_ioda_dma_bus_setup, .iommu_bypass_supported = pnv_pci_ioda_iommu_bypass_supported, .enable_device_hook = pnv_pci_enable_device_hook, .release_device = pnv_pci_release_device, .window_alignment = pnv_pci_window_alignment, .setup_bridge = pnv_pci_fixup_bridge_resources, .reset_secondary_bus = pnv_pci_reset_secondary_bus, .shutdown = pnv_pci_ioda_shutdown, #ifdef CONFIG_IOMMU_API .device_group = pnv_pci_device_group, #endif }; static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = { .enable_device_hook = pnv_ocapi_enable_device_hook, .release_device = pnv_pci_release_device, .window_alignment = pnv_pci_window_alignment, .reset_secondary_bus = pnv_pci_reset_secondary_bus, .shutdown = pnv_pci_ioda_shutdown, }; static void __init pnv_pci_init_ioda_phb(struct device_node *np, u64 hub_id, int ioda_type) { struct pci_controller *hose; struct pnv_phb *phb; unsigned long size, m64map_off, m32map_off, pemap_off; struct pnv_ioda_pe *root_pe; struct resource r; const __be64 *prop64; const __be32 *prop32; int len; unsigned int segno; u64 phb_id; void *aux; long rc; if (!of_device_is_available(np)) return; pr_info("Initializing %s PHB (%pOF)\n", pnv_phb_names[ioda_type], np); prop64 = of_get_property(np, "ibm,opal-phbid", NULL); if (!prop64) { pr_err(" Missing \"ibm,opal-phbid\" property !\n"); return; } phb_id = be64_to_cpup(prop64); pr_debug(" PHB-ID : 0x%016llx\n", phb_id); phb = kzalloc(sizeof(*phb), GFP_KERNEL); if (!phb) panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(*phb)); /* Allocate PCI controller */ phb->hose = hose = pcibios_alloc_controller(np); if (!phb->hose) { pr_err(" Can't allocate PCI controller for %pOF\n", np); memblock_free(phb, sizeof(struct pnv_phb)); return; } spin_lock_init(&phb->lock); prop32 = of_get_property(np, "bus-range", &len); if (prop32 && len == 8) { hose->first_busno = be32_to_cpu(prop32[0]); hose->last_busno = be32_to_cpu(prop32[1]); } else { pr_warn(" Broken <bus-range> on %pOF\n", np); hose->first_busno = 0; hose->last_busno = 0xff; } hose->private_data = phb; phb->hub_id = hub_id; phb->opal_id = phb_id; phb->type = ioda_type; mutex_init(&phb->ioda.pe_alloc_mutex); /* Detect specific models for error handling */ if (of_device_is_compatible(np, "ibm,p7ioc-pciex")) phb->model = PNV_PHB_MODEL_P7IOC; else if (of_device_is_compatible(np, "ibm,power8-pciex")) phb->model = PNV_PHB_MODEL_PHB3; else phb->model = PNV_PHB_MODEL_UNKNOWN; /* Initialize diagnostic data buffer */ prop32 = of_get_property(np, "ibm,phb-diag-data-size", NULL); if (prop32) phb->diag_data_size = be32_to_cpup(prop32); else phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE; phb->diag_data = kzalloc(phb->diag_data_size, GFP_KERNEL); if (!phb->diag_data) panic("%s: Failed to allocate %u bytes\n", __func__, phb->diag_data_size); /* Parse 32-bit and IO ranges (if any) */ pci_process_bridge_OF_ranges(hose, np, !hose->global_number); /* Get registers */ if (!of_address_to_resource(np, 0, &r)) { phb->regs_phys = r.start; phb->regs = ioremap(r.start, resource_size(&r)); if (phb->regs == NULL) pr_err(" Failed to map registers !\n"); } /* Initialize more IODA stuff */ phb->ioda.total_pe_num = 1; prop32 = of_get_property(np, "ibm,opal-num-pes", NULL); if (prop32) phb->ioda.total_pe_num = be32_to_cpup(prop32); prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL); if (prop32) phb->ioda.reserved_pe_idx = be32_to_cpup(prop32); /* Invalidate RID to PE# mapping */ for (segno = 0; segno < ARRAY_SIZE(phb->ioda.pe_rmap); segno++) phb->ioda.pe_rmap[segno] = IODA_INVALID_PE; /* Parse 64-bit MMIO range */ pnv_ioda_parse_m64_window(phb); phb->ioda.m32_size = resource_size(&hose->mem_resources[0]); /* FW Has already off top 64k of M32 space (MSI space) */ phb->ioda.m32_size += 0x10000; phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe_num; phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0]; phb->ioda.io_size = hose->pci_io_size; phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe_num; phb->ioda.io_pci_base = 0; /* XXX calculate this ? */ /* Allocate aux data & arrays. We don't have IO ports on PHB3 */ size = ALIGN(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8, sizeof(unsigned long)); m64map_off = size; size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]); m32map_off = size; size += phb->ioda.total_pe_num * sizeof(phb->ioda.m32_segmap[0]); pemap_off = size; size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe); aux = kzalloc(size, GFP_KERNEL); if (!aux) panic("%s: Failed to allocate %lu bytes\n", __func__, size); phb->ioda.pe_alloc = aux; phb->ioda.m64_segmap = aux + m64map_off; phb->ioda.m32_segmap = aux + m32map_off; for (segno = 0; segno < phb->ioda.total_pe_num; segno++) { phb->ioda.m64_segmap[segno] = IODA_INVALID_PE; phb->ioda.m32_segmap[segno] = IODA_INVALID_PE; } phb->ioda.pe_array = aux + pemap_off; /* * Choose PE number for root bus, which shouldn't have * M64 resources consumed by its child devices. To pick * the PE number adjacent to the reserved one if possible. */ pnv_ioda_reserve_pe(phb, phb->ioda.reserved_pe_idx); if (phb->ioda.reserved_pe_idx == 0) { phb->ioda.root_pe_idx = 1; pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx); } else if (phb->ioda.reserved_pe_idx == (phb->ioda.total_pe_num - 1)) { phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1; pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx); } else { /* otherwise just allocate one */ root_pe = pnv_ioda_alloc_pe(phb, 1); phb->ioda.root_pe_idx = root_pe->pe_number; } INIT_LIST_HEAD(&phb->ioda.pe_list); mutex_init(&phb->ioda.pe_list_mutex); #if 0 /* We should really do that ... */ rc = opal_pci_set_phb_mem_window(opal->phb_id, window_type, window_num, starting_real_address, starting_pci_address, segment_size); #endif pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n", phb->ioda.total_pe_num, phb->ioda.reserved_pe_idx, phb->ioda.m32_size, phb->ioda.m32_segsize); if (phb->ioda.m64_size) pr_info(" M64: 0x%lx [segment=0x%lx]\n", phb->ioda.m64_size, phb->ioda.m64_segsize); if (phb->ioda.io_size) pr_info(" IO: 0x%x [segment=0x%x]\n", phb->ioda.io_size, phb->ioda.io_segsize); phb->hose->ops = &pnv_pci_ops; phb->get_pe_state = pnv_ioda_get_pe_state; phb->freeze_pe = pnv_ioda_freeze_pe; phb->unfreeze_pe = pnv_ioda_unfreeze_pe; /* Setup MSI support */ pnv_pci_init_ioda_msis(phb); /* * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here * to let the PCI core do resource assignment. It's supposed * that the PCI core will do correct I/O and MMIO alignment * for the P2P bridge bars so that each PCI bus (excluding * the child P2P bridges) can form individual PE. */ ppc_md.pcibios_fixup = pnv_pci_ioda_fixup; switch (phb->type) { case PNV_PHB_NPU_OCAPI: hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops; break; default: hose->controller_ops = pnv_pci_ioda_controller_ops; } ppc_md.pcibios_default_alignment = pnv_pci_default_alignment; #ifdef CONFIG_PCI_IOV ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov; ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment; ppc_md.pcibios_sriov_enable = pnv_pcibios_sriov_enable; ppc_md.pcibios_sriov_disable = pnv_pcibios_sriov_disable; #endif pci_add_flags(PCI_REASSIGN_ALL_RSRC); /* Reset IODA tables to a clean state */ rc = opal_pci_reset(phb_id, OPAL_RESET_PCI_IODA_TABLE, OPAL_ASSERT_RESET); if (rc) pr_warn(" OPAL Error %ld performing IODA table reset !\n", rc); /* * If we're running in kdump kernel, the previous kernel never * shutdown PCI devices correctly. We already got IODA table * cleaned out. So we have to issue PHB reset to stop all PCI * transactions from previous kernel. The ppc_pci_reset_phbs * kernel parameter will force this reset too. Additionally, * if the IODA reset above failed then use a bigger hammer. * This can happen if we get a PHB fatal error in very early * boot. */ if (is_kdump_kernel() || pci_reset_phbs || rc) { pr_info(" Issue PHB reset ...\n"); pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); } /* Remove M64 resource if we can't configure it successfully */ if (!phb->init_m64 || phb->init_m64(phb)) hose->mem_resources[1].flags = 0; /* create pci_dn's for DT nodes under this PHB */ pci_devs_phb_init_dynamic(hose); } void __init pnv_pci_init_ioda2_phb(struct device_node *np) { pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2); } void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np) { pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI); } static void pnv_npu2_opencapi_cfg_size_fixup(struct pci_dev *dev) { struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus); if (!machine_is(powernv)) return; if (phb->type == PNV_PHB_NPU_OCAPI) dev->cfg_size = PCI_CFG_SPACE_EXP_SIZE; } DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pnv_npu2_opencapi_cfg_size_fixup);
linux-master
arch/powerpc/platforms/powernv/pci-ioda.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV Real Time Clock. * * Copyright 2011 IBM Corp. */ #include <linux/kernel.h> #include <linux/time.h> #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <asm/opal.h> #include <asm/firmware.h> #include <asm/machdep.h> static void __init opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm) { tm->tm_year = ((bcd2bin(y_m_d >> 24) * 100) + bcd2bin((y_m_d >> 16) & 0xff)) - 1900; tm->tm_mon = bcd2bin((y_m_d >> 8) & 0xff) - 1; tm->tm_mday = bcd2bin(y_m_d & 0xff); tm->tm_hour = bcd2bin((h_m_s_ms >> 56) & 0xff); tm->tm_min = bcd2bin((h_m_s_ms >> 48) & 0xff); tm->tm_sec = bcd2bin((h_m_s_ms >> 40) & 0xff); tm->tm_wday = -1; } time64_t __init opal_get_boot_time(void) { struct rtc_time tm; u32 y_m_d; u64 h_m_s_ms; __be32 __y_m_d; __be64 __h_m_s_ms; long rc = OPAL_BUSY; if (!opal_check_token(OPAL_RTC_READ)) return 0; while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); if (rc == OPAL_BUSY_EVENT) { mdelay(OPAL_BUSY_DELAY_MS); opal_poll_events(NULL); } else if (rc == OPAL_BUSY) { mdelay(OPAL_BUSY_DELAY_MS); } } if (rc != OPAL_SUCCESS) return 0; y_m_d = be32_to_cpu(__y_m_d); h_m_s_ms = be64_to_cpu(__h_m_s_ms); opal_to_tm(y_m_d, h_m_s_ms, &tm); return rtc_tm_to_time64(&tm); } static __init int opal_time_init(void) { struct platform_device *pdev; struct device_node *rtc; rtc = of_find_node_by_path("/ibm,opal/rtc"); if (rtc) { pdev = of_platform_device_create(rtc, "opal-rtc", NULL); of_node_put(rtc); } else { if (opal_check_token(OPAL_RTC_READ) || opal_check_token(OPAL_READ_TPO)) pdev = platform_device_register_simple("opal-rtc", -1, NULL, 0); else return -ENODEV; } return PTR_ERR_OR_ZERO(pdev); } machine_subsys_initcall(powernv, opal_time_init);
linux-master
arch/powerpc/platforms/powernv/opal-rtc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV OPAL Sensor-groups interface * * Copyright 2017 IBM Corp. */ #define pr_fmt(fmt) "opal-sensor-groups: " fmt #include <linux/of.h> #include <linux/kobject.h> #include <linux/slab.h> #include <asm/opal.h> static DEFINE_MUTEX(sg_mutex); static struct kobject *sg_kobj; struct sg_attr { u32 handle; struct kobj_attribute attr; }; static struct sensor_group { char name[20]; struct attribute_group sg; struct sg_attr *sgattrs; } *sgs; int sensor_group_enable(u32 handle, bool enable) { struct opal_msg msg; int token, ret; token = opal_async_get_token_interruptible(); if (token < 0) return token; ret = opal_sensor_group_enable(handle, token, enable); if (ret == OPAL_ASYNC_COMPLETION) { ret = opal_async_wait_response(token, &msg); if (ret) { pr_devel("Failed to wait for the async response\n"); ret = -EIO; goto out; } ret = opal_error_code(opal_get_async_rc(msg)); } else { ret = opal_error_code(ret); } out: opal_async_release_token(token); return ret; } EXPORT_SYMBOL_GPL(sensor_group_enable); static ssize_t sg_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct sg_attr *sattr = container_of(attr, struct sg_attr, attr); struct opal_msg msg; u32 data; int ret, token; ret = kstrtoint(buf, 0, &data); if (ret) return ret; if (data != 1) return -EINVAL; token = opal_async_get_token_interruptible(); if (token < 0) { pr_devel("Failed to get token\n"); return token; } ret = mutex_lock_interruptible(&sg_mutex); if (ret) goto out_token; ret = opal_sensor_group_clear(sattr->handle, token); switch (ret) { case OPAL_ASYNC_COMPLETION: ret = opal_async_wait_response(token, &msg); if (ret) { pr_devel("Failed to wait for the async response\n"); ret = -EIO; goto out; } ret = opal_error_code(opal_get_async_rc(msg)); if (!ret) ret = count; break; case OPAL_SUCCESS: ret = count; break; default: ret = opal_error_code(ret); } out: mutex_unlock(&sg_mutex); out_token: opal_async_release_token(token); return ret; } static struct sg_ops_info { int opal_no; const char *attr_name; ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count); } ops_info[] = { { OPAL_SENSOR_GROUP_CLEAR, "clear", sg_store }, }; static void add_attr(int handle, struct sg_attr *attr, int index) { attr->handle = handle; sysfs_attr_init(&attr->attr.attr); attr->attr.attr.name = ops_info[index].attr_name; attr->attr.attr.mode = 0220; attr->attr.store = ops_info[index].store; } static int __init add_attr_group(const __be32 *ops, int len, struct sensor_group *sg, u32 handle) { int i, j; int count = 0; for (i = 0; i < len; i++) for (j = 0; j < ARRAY_SIZE(ops_info); j++) if (be32_to_cpu(ops[i]) == ops_info[j].opal_no) { add_attr(handle, &sg->sgattrs[count], j); sg->sg.attrs[count] = &sg->sgattrs[count].attr.attr; count++; } return sysfs_create_group(sg_kobj, &sg->sg); } static int __init get_nr_attrs(const __be32 *ops, int len) { int i, j; int nr_attrs = 0; for (i = 0; i < len; i++) for (j = 0; j < ARRAY_SIZE(ops_info); j++) if (be32_to_cpu(ops[i]) == ops_info[j].opal_no) nr_attrs++; return nr_attrs; } void __init opal_sensor_groups_init(void) { struct device_node *sg, *node; int i = 0; sg = of_find_compatible_node(NULL, NULL, "ibm,opal-sensor-group"); if (!sg) { pr_devel("Sensor groups node not found\n"); return; } sgs = kcalloc(of_get_child_count(sg), sizeof(*sgs), GFP_KERNEL); if (!sgs) goto out_sg_put; sg_kobj = kobject_create_and_add("sensor_groups", opal_kobj); if (!sg_kobj) { pr_warn("Failed to create sensor group kobject\n"); goto out_sgs; } for_each_child_of_node(sg, node) { const __be32 *ops; u32 sgid, len, nr_attrs, chipid; ops = of_get_property(node, "ops", &len); if (!ops) continue; nr_attrs = get_nr_attrs(ops, len); if (!nr_attrs) continue; sgs[i].sgattrs = kcalloc(nr_attrs, sizeof(*sgs[i].sgattrs), GFP_KERNEL); if (!sgs[i].sgattrs) goto out_sgs_sgattrs; sgs[i].sg.attrs = kcalloc(nr_attrs + 1, sizeof(*sgs[i].sg.attrs), GFP_KERNEL); if (!sgs[i].sg.attrs) { kfree(sgs[i].sgattrs); goto out_sgs_sgattrs; } if (of_property_read_u32(node, "sensor-group-id", &sgid)) { pr_warn("sensor-group-id property not found\n"); goto out_sgs_sgattrs; } if (!of_property_read_u32(node, "ibm,chip-id", &chipid)) sprintf(sgs[i].name, "%pOFn%d", node, chipid); else sprintf(sgs[i].name, "%pOFn", node); sgs[i].sg.name = sgs[i].name; if (add_attr_group(ops, len, &sgs[i], sgid)) { pr_warn("Failed to create sensor attribute group %s\n", sgs[i].sg.name); goto out_sgs_sgattrs; } i++; } of_node_put(sg); return; out_sgs_sgattrs: while (--i >= 0) { kfree(sgs[i].sgattrs); kfree(sgs[i].sg.attrs); } kobject_put(sg_kobj); of_node_put(node); out_sgs: kfree(sgs); out_sg_put: of_node_put(sg); }
linux-master
arch/powerpc/platforms/powernv/opal-sensor-groups.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2013, Michael (Ellerman|Neuling), IBM Corporation. */ #define pr_fmt(fmt) "powernv: " fmt #include <linux/kernel.h> #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/device.h> #include <linux/gfp.h> #include <linux/smp.h> #include <linux/stop_machine.h> #include <asm/cputhreads.h> #include <asm/cpuidle.h> #include <asm/kvm_ppc.h> #include <asm/machdep.h> #include <asm/opal.h> #include <asm/smp.h> #include <trace/events/ipi.h> #include "subcore.h" #include "powernv.h" /* * Split/unsplit procedure: * * A core can be in one of three states, unsplit, 2-way split, and 4-way split. * * The mapping to subcores_per_core is simple: * * State | subcores_per_core * ------------|------------------ * Unsplit | 1 * 2-way split | 2 * 4-way split | 4 * * The core is split along thread boundaries, the mapping between subcores and * threads is as follows: * * Unsplit: * ---------------------------- * Subcore | 0 | * ---------------------------- * Thread | 0 1 2 3 4 5 6 7 | * ---------------------------- * * 2-way split: * ------------------------------------- * Subcore | 0 | 1 | * ------------------------------------- * Thread | 0 1 2 3 | 4 5 6 7 | * ------------------------------------- * * 4-way split: * ----------------------------------------- * Subcore | 0 | 1 | 2 | 3 | * ----------------------------------------- * Thread | 0 1 | 2 3 | 4 5 | 6 7 | * ----------------------------------------- * * * Transitions * ----------- * * It is not possible to transition between either of the split states, the * core must first be unsplit. The legal transitions are: * * ----------- --------------- * | | <----> | 2-way split | * | | --------------- * | Unsplit | * | | --------------- * | | <----> | 4-way split | * ----------- --------------- * * Unsplitting * ----------- * * Unsplitting is the simpler procedure. It requires thread 0 to request the * unsplit while all other threads NAP. * * Thread 0 clears HID0_POWER8_DYNLPARDIS (Dynamic LPAR Disable). This tells * the hardware that if all threads except 0 are napping, the hardware should * unsplit the core. * * Non-zero threads are sent to a NAP loop, they don't exit the loop until they * see the core unsplit. * * Core 0 spins waiting for the hardware to see all the other threads napping * and perform the unsplit. * * Once thread 0 sees the unsplit, it IPIs the secondary threads to wake them * out of NAP. They will then see the core unsplit and exit the NAP loop. * * Splitting * --------- * * The basic splitting procedure is fairly straight forward. However it is * complicated by the fact that after the split occurs, the newly created * subcores are not in a fully initialised state. * * Most notably the subcores do not have the correct value for SDR1, which * means they must not be running in virtual mode when the split occurs. The * subcores have separate timebases SPRs but these are pre-synchronised by * opal. * * To begin with secondary threads are sent to an assembly routine. There they * switch to real mode, so they are immune to the uninitialised SDR1 value. * Once in real mode they indicate that they are in real mode, and spin waiting * to see the core split. * * Thread 0 waits to see that all secondaries are in real mode, and then begins * the splitting procedure. It firstly sets HID0_POWER8_DYNLPARDIS, which * prevents the hardware from unsplitting. Then it sets the appropriate HID bit * to request the split, and spins waiting to see that the split has happened. * * Concurrently the secondaries will notice the split. When they do they set up * their SPRs, notably SDR1, and then they can return to virtual mode and exit * the procedure. */ /* Initialised at boot by subcore_init() */ static int subcores_per_core; /* * Used to communicate to offline cpus that we want them to pop out of the * offline loop and do a split or unsplit. * * 0 - no split happening * 1 - unsplit in progress * 2 - split to 2 in progress * 4 - split to 4 in progress */ static int new_split_mode; static cpumask_var_t cpu_offline_mask; struct split_state { u8 step; u8 master; }; static DEFINE_PER_CPU(struct split_state, split_state); static void wait_for_sync_step(int step) { int i, cpu = smp_processor_id(); for (i = cpu + 1; i < cpu + threads_per_core; i++) while(per_cpu(split_state, i).step < step) barrier(); /* Order the wait loop vs any subsequent loads/stores. */ mb(); } static void update_hid_in_slw(u64 hid0) { u64 idle_states = pnv_get_supported_cpuidle_states(); if (idle_states & OPAL_PM_WINKLE_ENABLED) { /* OPAL call to patch slw with the new HID0 value */ u64 cpu_pir = hard_smp_processor_id(); opal_slw_set_reg(cpu_pir, SPRN_HID0, hid0); } } static inline void update_power8_hid0(unsigned long hid0) { /* * The HID0 update on Power8 should at the very least be * preceded by a SYNC instruction followed by an ISYNC * instruction */ asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0)); } static void unsplit_core(void) { u64 hid0, mask; int i, cpu; mask = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE; cpu = smp_processor_id(); if (cpu_thread_in_core(cpu) != 0) { while (mfspr(SPRN_HID0) & mask) power7_idle_type(PNV_THREAD_NAP); per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT; return; } hid0 = mfspr(SPRN_HID0); hid0 &= ~HID0_POWER8_DYNLPARDIS; update_power8_hid0(hid0); update_hid_in_slw(hid0); while (mfspr(SPRN_HID0) & mask) cpu_relax(); /* Wake secondaries out of NAP */ for (i = cpu + 1; i < cpu + threads_per_core; i++) smp_send_reschedule(i); wait_for_sync_step(SYNC_STEP_UNSPLIT); } static void split_core(int new_mode) { struct { u64 value; u64 mask; } split_parms[2] = { { HID0_POWER8_1TO2LPAR, HID0_POWER8_2LPARMODE }, { HID0_POWER8_1TO4LPAR, HID0_POWER8_4LPARMODE } }; int i, cpu; u64 hid0; /* Convert new_mode (2 or 4) into an index into our parms array */ i = (new_mode >> 1) - 1; BUG_ON(i < 0 || i > 1); cpu = smp_processor_id(); if (cpu_thread_in_core(cpu) != 0) { split_core_secondary_loop(&per_cpu(split_state, cpu).step); return; } wait_for_sync_step(SYNC_STEP_REAL_MODE); /* Write new mode */ hid0 = mfspr(SPRN_HID0); hid0 |= HID0_POWER8_DYNLPARDIS | split_parms[i].value; update_power8_hid0(hid0); update_hid_in_slw(hid0); /* Wait for it to happen */ while (!(mfspr(SPRN_HID0) & split_parms[i].mask)) cpu_relax(); } static void cpu_do_split(int new_mode) { /* * At boot subcores_per_core will be 0, so we will always unsplit at * boot. In the usual case where the core is already unsplit it's a * nop, and this just ensures the kernel's notion of the mode is * consistent with the hardware. */ if (subcores_per_core != 1) unsplit_core(); if (new_mode != 1) split_core(new_mode); mb(); per_cpu(split_state, smp_processor_id()).step = SYNC_STEP_FINISHED; } bool cpu_core_split_required(void) { smp_rmb(); if (!new_split_mode) return false; cpu_do_split(new_split_mode); return true; } void update_subcore_sibling_mask(void) { int cpu; /* * sibling mask for the first cpu. Left shift this by required bits * to get sibling mask for the rest of the cpus. */ int sibling_mask_first_cpu = (1 << threads_per_subcore) - 1; for_each_possible_cpu(cpu) { int tid = cpu_thread_in_core(cpu); int offset = (tid / threads_per_subcore) * threads_per_subcore; int mask = sibling_mask_first_cpu << offset; paca_ptrs[cpu]->subcore_sibling_mask = mask; } } static int cpu_update_split_mode(void *data) { int cpu, new_mode = *(int *)data; if (this_cpu_ptr(&split_state)->master) { new_split_mode = new_mode; smp_wmb(); cpumask_andnot(cpu_offline_mask, cpu_present_mask, cpu_online_mask); /* This should work even though the cpu is offline */ for_each_cpu(cpu, cpu_offline_mask) smp_send_reschedule(cpu); } cpu_do_split(new_mode); if (this_cpu_ptr(&split_state)->master) { /* Wait for all cpus to finish before we touch subcores_per_core */ for_each_present_cpu(cpu) { if (cpu >= setup_max_cpus) break; while(per_cpu(split_state, cpu).step < SYNC_STEP_FINISHED) barrier(); } new_split_mode = 0; /* Make the new mode public */ subcores_per_core = new_mode; threads_per_subcore = threads_per_core / subcores_per_core; update_subcore_sibling_mask(); /* Make sure the new mode is written before we exit */ mb(); } return 0; } static int set_subcores_per_core(int new_mode) { struct split_state *state; int cpu; if (kvm_hv_mode_active()) { pr_err("Unable to change split core mode while KVM active.\n"); return -EBUSY; } /* * We are only called at boot, or from the sysfs write. If that ever * changes we'll need a lock here. */ BUG_ON(new_mode < 1 || new_mode > 4 || new_mode == 3); for_each_present_cpu(cpu) { state = &per_cpu(split_state, cpu); state->step = SYNC_STEP_INITIAL; state->master = 0; } cpus_read_lock(); /* This cpu will update the globals before exiting stop machine */ this_cpu_ptr(&split_state)->master = 1; /* Ensure state is consistent before we call the other cpus */ mb(); stop_machine_cpuslocked(cpu_update_split_mode, &new_mode, cpu_online_mask); cpus_read_unlock(); return 0; } static ssize_t __used store_subcores_per_core(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; int rc; /* We are serialised by the attribute lock */ rc = sscanf(buf, "%lx", &val); if (rc != 1) return -EINVAL; switch (val) { case 1: case 2: case 4: if (subcores_per_core == val) /* Nothing to do */ goto out; break; default: return -EINVAL; } rc = set_subcores_per_core(val); if (rc) return rc; out: return count; } static ssize_t show_subcores_per_core(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%x\n", subcores_per_core); } static DEVICE_ATTR(subcores_per_core, 0644, show_subcores_per_core, store_subcores_per_core); static int subcore_init(void) { struct device *dev_root; unsigned pvr_ver; int rc = 0; pvr_ver = PVR_VER(mfspr(SPRN_PVR)); if (pvr_ver != PVR_POWER8 && pvr_ver != PVR_POWER8E && pvr_ver != PVR_POWER8NVL) return 0; /* * We need all threads in a core to be present to split/unsplit so * continue only if max_cpus are aligned to threads_per_core. */ if (setup_max_cpus % threads_per_core) return 0; BUG_ON(!alloc_cpumask_var(&cpu_offline_mask, GFP_KERNEL)); set_subcores_per_core(1); dev_root = bus_get_dev_root(&cpu_subsys); if (dev_root) { rc = device_create_file(dev_root, &dev_attr_subcores_per_core); put_device(dev_root); } return rc; } machine_device_initcall(powernv, subcore_init);
linux-master
arch/powerpc/platforms/powernv/subcore.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV OPAL Firmware Update Interface * * Copyright 2013 IBM Corp. */ #define DEBUG #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/delay.h> #include <asm/opal.h> /* FLASH status codes */ #define FLASH_NO_OP -1099 /* No operation initiated by user */ #define FLASH_NO_AUTH -9002 /* Not a service authority partition */ /* Validate image status values */ #define VALIDATE_IMG_READY -1001 /* Image ready for validation */ #define VALIDATE_IMG_INCOMPLETE -1002 /* User copied < VALIDATE_BUF_SIZE */ /* Manage image status values */ #define MANAGE_ACTIVE_ERR -9001 /* Cannot overwrite active img */ /* Flash image status values */ #define FLASH_IMG_READY 0 /* Img ready for flash on reboot */ #define FLASH_INVALID_IMG -1003 /* Flash image shorter than expected */ #define FLASH_IMG_NULL_DATA -1004 /* Bad data in sg list entry */ #define FLASH_IMG_BAD_LEN -1005 /* Bad length in sg list entry */ /* Manage operation tokens */ #define FLASH_REJECT_TMP_SIDE 0 /* Reject temporary fw image */ #define FLASH_COMMIT_TMP_SIDE 1 /* Commit temporary fw image */ /* Update tokens */ #define FLASH_UPDATE_CANCEL 0 /* Cancel update request */ #define FLASH_UPDATE_INIT 1 /* Initiate update */ /* Validate image update result tokens */ #define VALIDATE_TMP_UPDATE 0 /* T side will be updated */ #define VALIDATE_FLASH_AUTH 1 /* Partition does not have authority */ #define VALIDATE_INVALID_IMG 2 /* Candidate image is not valid */ #define VALIDATE_CUR_UNKNOWN 3 /* Current fixpack level is unknown */ /* * Current T side will be committed to P side before being replace with new * image, and the new image is downlevel from current image */ #define VALIDATE_TMP_COMMIT_DL 4 /* * Current T side will be committed to P side before being replaced with new * image */ #define VALIDATE_TMP_COMMIT 5 /* * T side will be updated with a downlevel image */ #define VALIDATE_TMP_UPDATE_DL 6 /* * The candidate image's release date is later than the system's firmware * service entitlement date - service warranty period has expired */ #define VALIDATE_OUT_OF_WRNTY 7 /* Validate buffer size */ #define VALIDATE_BUF_SIZE 4096 /* XXX: Assume candidate image size is <= 1GB */ #define MAX_IMAGE_SIZE 0x40000000 /* Image status */ enum { IMAGE_INVALID, IMAGE_LOADING, IMAGE_READY, }; /* Candidate image data */ struct image_data_t { int status; void *data; uint32_t size; }; /* Candidate image header */ struct image_header_t { uint16_t magic; uint16_t version; uint32_t size; }; struct validate_flash_t { int status; /* Return status */ void *buf; /* Candidate image buffer */ uint32_t buf_size; /* Image size */ uint32_t result; /* Update results token */ }; struct manage_flash_t { int status; /* Return status */ }; struct update_flash_t { int status; /* Return status */ }; static struct image_header_t image_header; static struct image_data_t image_data; static struct validate_flash_t validate_flash_data; static struct manage_flash_t manage_flash_data; /* Initialize update_flash_data status to No Operation */ static struct update_flash_t update_flash_data = { .status = FLASH_NO_OP, }; static DEFINE_MUTEX(image_data_mutex); /* * Validate candidate image */ static inline void opal_flash_validate(void) { long ret; void *buf = validate_flash_data.buf; __be32 size = cpu_to_be32(validate_flash_data.buf_size); __be32 result; ret = opal_validate_flash(__pa(buf), &size, &result); validate_flash_data.status = ret; validate_flash_data.buf_size = be32_to_cpu(size); validate_flash_data.result = be32_to_cpu(result); } /* * Validate output format: * validate result token * current image version details * new image version details */ static ssize_t validate_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct validate_flash_t *args_buf = &validate_flash_data; int len; /* Candidate image is not validated */ if (args_buf->status < VALIDATE_TMP_UPDATE) { len = sprintf(buf, "%d\n", args_buf->status); goto out; } /* Result token */ len = sprintf(buf, "%d\n", args_buf->result); /* Current and candidate image version details */ if ((args_buf->result != VALIDATE_TMP_UPDATE) && (args_buf->result < VALIDATE_CUR_UNKNOWN)) goto out; if (args_buf->buf_size > (VALIDATE_BUF_SIZE - len)) { memcpy(buf + len, args_buf->buf, VALIDATE_BUF_SIZE - len); len = VALIDATE_BUF_SIZE; } else { memcpy(buf + len, args_buf->buf, args_buf->buf_size); len += args_buf->buf_size; } out: /* Set status to default */ args_buf->status = FLASH_NO_OP; return len; } /* * Validate candidate firmware image * * Note: * We are only interested in first 4K bytes of the * candidate image. */ static ssize_t validate_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct validate_flash_t *args_buf = &validate_flash_data; if (buf[0] != '1') return -EINVAL; mutex_lock(&image_data_mutex); if (image_data.status != IMAGE_READY || image_data.size < VALIDATE_BUF_SIZE) { args_buf->result = VALIDATE_INVALID_IMG; args_buf->status = VALIDATE_IMG_INCOMPLETE; goto out; } /* Copy first 4k bytes of candidate image */ memcpy(args_buf->buf, image_data.data, VALIDATE_BUF_SIZE); args_buf->status = VALIDATE_IMG_READY; args_buf->buf_size = VALIDATE_BUF_SIZE; /* Validate candidate image */ opal_flash_validate(); out: mutex_unlock(&image_data_mutex); return count; } /* * Manage flash routine */ static inline void opal_flash_manage(uint8_t op) { struct manage_flash_t *const args_buf = &manage_flash_data; args_buf->status = opal_manage_flash(op); } /* * Show manage flash status */ static ssize_t manage_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct manage_flash_t *const args_buf = &manage_flash_data; int rc; rc = sprintf(buf, "%d\n", args_buf->status); /* Set status to default*/ args_buf->status = FLASH_NO_OP; return rc; } /* * Manage operations: * 0 - Reject * 1 - Commit */ static ssize_t manage_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { uint8_t op; switch (buf[0]) { case '0': op = FLASH_REJECT_TMP_SIDE; break; case '1': op = FLASH_COMMIT_TMP_SIDE; break; default: return -EINVAL; } /* commit/reject temporary image */ opal_flash_manage(op); return count; } /* * OPAL update flash */ static int opal_flash_update(int op) { struct opal_sg_list *list; unsigned long addr; int64_t rc = OPAL_PARAMETER; if (op == FLASH_UPDATE_CANCEL) { pr_alert("FLASH: Image update cancelled\n"); addr = '\0'; goto flash; } list = opal_vmalloc_to_sg_list(image_data.data, image_data.size); if (!list) goto invalid_img; /* First entry address */ addr = __pa(list); flash: rc = opal_update_flash(addr); invalid_img: return rc; } /* This gets called just before system reboots */ void opal_flash_update_print_message(void) { if (update_flash_data.status != FLASH_IMG_READY) return; pr_alert("FLASH: Flashing new firmware\n"); pr_alert("FLASH: Image is %u bytes\n", image_data.size); pr_alert("FLASH: Performing flash and reboot/shutdown\n"); pr_alert("FLASH: This will take several minutes. Do not power off!\n"); /* Small delay to help getting the above message out */ msleep(500); } /* * Show candidate image status */ static ssize_t update_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct update_flash_t *const args_buf = &update_flash_data; return sprintf(buf, "%d\n", args_buf->status); } /* * Set update image flag * 1 - Flash new image * 0 - Cancel flash request */ static ssize_t update_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct update_flash_t *const args_buf = &update_flash_data; int rc = count; mutex_lock(&image_data_mutex); switch (buf[0]) { case '0': if (args_buf->status == FLASH_IMG_READY) opal_flash_update(FLASH_UPDATE_CANCEL); args_buf->status = FLASH_NO_OP; break; case '1': /* Image is loaded? */ if (image_data.status == IMAGE_READY) args_buf->status = opal_flash_update(FLASH_UPDATE_INIT); else args_buf->status = FLASH_INVALID_IMG; break; default: rc = -EINVAL; } mutex_unlock(&image_data_mutex); return rc; } /* * Free image buffer */ static void free_image_buf(void) { void *addr; int size; addr = image_data.data; size = PAGE_ALIGN(image_data.size); while (size > 0) { ClearPageReserved(vmalloc_to_page(addr)); addr += PAGE_SIZE; size -= PAGE_SIZE; } vfree(image_data.data); image_data.data = NULL; image_data.status = IMAGE_INVALID; } /* * Allocate image buffer. */ static int alloc_image_buf(char *buffer, size_t count) { void *addr; int size; if (count < sizeof(image_header)) { pr_warn("FLASH: Invalid candidate image\n"); return -EINVAL; } memcpy(&image_header, (void *)buffer, sizeof(image_header)); image_data.size = be32_to_cpu(image_header.size); pr_debug("FLASH: Candidate image size = %u\n", image_data.size); if (image_data.size > MAX_IMAGE_SIZE) { pr_warn("FLASH: Too large image\n"); return -EINVAL; } if (image_data.size < VALIDATE_BUF_SIZE) { pr_warn("FLASH: Image is shorter than expected\n"); return -EINVAL; } image_data.data = vzalloc(PAGE_ALIGN(image_data.size)); if (!image_data.data) { pr_err("%s : Failed to allocate memory\n", __func__); return -ENOMEM; } /* Pin memory */ addr = image_data.data; size = PAGE_ALIGN(image_data.size); while (size > 0) { SetPageReserved(vmalloc_to_page(addr)); addr += PAGE_SIZE; size -= PAGE_SIZE; } image_data.status = IMAGE_LOADING; return 0; } /* * Copy candidate image * * Parse candidate image header to get total image size * and pre-allocate required memory. */ static ssize_t image_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { int rc; mutex_lock(&image_data_mutex); /* New image ? */ if (pos == 0) { /* Free memory, if already allocated */ if (image_data.data) free_image_buf(); /* Cancel outstanding image update request */ if (update_flash_data.status == FLASH_IMG_READY) opal_flash_update(FLASH_UPDATE_CANCEL); /* Allocate memory */ rc = alloc_image_buf(buffer, count); if (rc) goto out; } if (image_data.status != IMAGE_LOADING) { rc = -ENOMEM; goto out; } if ((pos + count) > image_data.size) { rc = -EINVAL; goto out; } memcpy(image_data.data + pos, (void *)buffer, count); rc = count; /* Set image status */ if ((pos + count) == image_data.size) { pr_debug("FLASH: Candidate image loaded....\n"); image_data.status = IMAGE_READY; } out: mutex_unlock(&image_data_mutex); return rc; } /* * sysfs interface : * OPAL uses below sysfs files for code update. * We create these files under /sys/firmware/opal. * * image : Interface to load candidate firmware image * validate_flash : Validate firmware image * manage_flash : Commit/Reject firmware image * update_flash : Flash new firmware image * */ static const struct bin_attribute image_data_attr = { .attr = {.name = "image", .mode = 0200}, .size = MAX_IMAGE_SIZE, /* Limit image size */ .write = image_data_write, }; static struct kobj_attribute validate_attribute = __ATTR(validate_flash, 0600, validate_show, validate_store); static struct kobj_attribute manage_attribute = __ATTR(manage_flash, 0600, manage_show, manage_store); static struct kobj_attribute update_attribute = __ATTR(update_flash, 0600, update_show, update_store); static struct attribute *image_op_attrs[] = { &validate_attribute.attr, &manage_attribute.attr, &update_attribute.attr, NULL /* need to NULL terminate the list of attributes */ }; static const struct attribute_group image_op_attr_group = { .attrs = image_op_attrs, }; void __init opal_flash_update_init(void) { int ret; /* Firmware update is not supported by firmware */ if (!opal_check_token(OPAL_FLASH_VALIDATE)) return; /* Allocate validate image buffer */ validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL); if (!validate_flash_data.buf) { pr_err("%s : Failed to allocate memory\n", __func__); return; } /* Make sure /sys/firmware/opal directory is created */ if (!opal_kobj) { pr_warn("FLASH: opal kobject is not available\n"); goto nokobj; } /* Create the sysfs files */ ret = sysfs_create_group(opal_kobj, &image_op_attr_group); if (ret) { pr_warn("FLASH: Failed to create sysfs files\n"); goto nokobj; } ret = sysfs_create_bin_file(opal_kobj, &image_data_attr); if (ret) { pr_warn("FLASH: Failed to create sysfs files\n"); goto nosysfs_file; } /* Set default status */ validate_flash_data.status = FLASH_NO_OP; manage_flash_data.status = FLASH_NO_OP; update_flash_data.status = FLASH_NO_OP; image_data.status = IMAGE_INVALID; return; nosysfs_file: sysfs_remove_group(opal_kobj, &image_op_attr_group); nokobj: kfree(validate_flash_data.buf); return; }
linux-master
arch/powerpc/platforms/powernv/opal-flash.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV sensor code * * Copyright (C) 2013 IBM */ #include <linux/delay.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <asm/opal.h> #include <asm/machdep.h> /* * This will return sensor information to driver based on the requested sensor * handle. A handle is an opaque id for the powernv, read by the driver from the * device tree.. */ int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data) { int ret, token; struct opal_msg msg; __be32 data; token = opal_async_get_token_interruptible(); if (token < 0) return token; ret = opal_sensor_read(sensor_hndl, token, &data); switch (ret) { case OPAL_ASYNC_COMPLETION: ret = opal_async_wait_response(token, &msg); if (ret) { pr_err("%s: Failed to wait for the async response, %d\n", __func__, ret); goto out; } ret = opal_error_code(opal_get_async_rc(msg)); *sensor_data = be32_to_cpu(data); break; case OPAL_SUCCESS: ret = 0; *sensor_data = be32_to_cpu(data); break; case OPAL_WRONG_STATE: ret = -EIO; break; default: ret = opal_error_code(ret); break; } out: opal_async_release_token(token); return ret; } EXPORT_SYMBOL_GPL(opal_get_sensor_data); int opal_get_sensor_data_u64(u32 sensor_hndl, u64 *sensor_data) { int ret, token; struct opal_msg msg; __be64 data; if (!opal_check_token(OPAL_SENSOR_READ_U64)) { u32 sdata; ret = opal_get_sensor_data(sensor_hndl, &sdata); if (!ret) *sensor_data = sdata; return ret; } token = opal_async_get_token_interruptible(); if (token < 0) return token; ret = opal_sensor_read_u64(sensor_hndl, token, &data); switch (ret) { case OPAL_ASYNC_COMPLETION: ret = opal_async_wait_response(token, &msg); if (ret) { pr_err("%s: Failed to wait for the async response, %d\n", __func__, ret); goto out_token; } ret = opal_error_code(opal_get_async_rc(msg)); *sensor_data = be64_to_cpu(data); break; case OPAL_SUCCESS: ret = 0; *sensor_data = be64_to_cpu(data); break; case OPAL_WRONG_STATE: ret = -EIO; break; default: ret = opal_error_code(ret); break; } out_token: opal_async_release_token(token); return ret; } EXPORT_SYMBOL_GPL(opal_get_sensor_data_u64); int __init opal_sensor_init(void) { struct platform_device *pdev; struct device_node *sensor; sensor = of_find_node_by_path("/ibm,opal/sensors"); if (!sensor) { pr_err("Opal node 'sensors' not found\n"); return -ENODEV; } pdev = of_platform_device_create(sensor, "opal-sensor", NULL); of_node_put(sensor); return PTR_ERR_OR_ZERO(pdev); }
linux-master
arch/powerpc/platforms/powernv/opal-sensor.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV cpuidle code * * Copyright 2015 IBM Corp. */ #include <linux/types.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/device.h> #include <linux/cpu.h> #include <asm/firmware.h> #include <asm/interrupt.h> #include <asm/machdep.h> #include <asm/opal.h> #include <asm/cputhreads.h> #include <asm/cpuidle.h> #include <asm/code-patching.h> #include <asm/smp.h> #include <asm/runlatch.h> #include <asm/dbell.h> #include "powernv.h" #include "subcore.h" /* Power ISA 3.0 allows for stop states 0x0 - 0xF */ #define MAX_STOP_STATE 0xF #define P9_STOP_SPR_MSR 2000 #define P9_STOP_SPR_PSSCR 855 static u32 supported_cpuidle_states; struct pnv_idle_states_t *pnv_idle_states; int nr_pnv_idle_states; /* * The default stop state that will be used by ppc_md.power_save * function on platforms that support stop instruction. */ static u64 pnv_default_stop_val; static u64 pnv_default_stop_mask; static bool default_stop_found; /* * First stop state levels when SPR and TB loss can occur. */ static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1; static u64 deep_spr_loss_state = MAX_STOP_STATE + 1; /* * psscr value and mask of the deepest stop idle state. * Used when a cpu is offlined. */ static u64 pnv_deepest_stop_psscr_val; static u64 pnv_deepest_stop_psscr_mask; static u64 pnv_deepest_stop_flag; static bool deepest_stop_found; static unsigned long power7_offline_type; static int __init pnv_save_sprs_for_deep_states(void) { int cpu; int rc; /* * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric across * all cpus at boot. Get these reg values of current cpu and use the * same across all cpus. */ uint64_t lpcr_val = mfspr(SPRN_LPCR); uint64_t hid0_val = mfspr(SPRN_HID0); uint64_t hmeer_val = mfspr(SPRN_HMEER); uint64_t msr_val = MSR_IDLE; uint64_t psscr_val = pnv_deepest_stop_psscr_val; for_each_present_cpu(cpu) { uint64_t pir = get_hard_smp_processor_id(cpu); uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu]; rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val); if (rc != 0) return rc; rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); if (rc != 0) return rc; if (cpu_has_feature(CPU_FTR_ARCH_300)) { rc = opal_slw_set_reg(pir, P9_STOP_SPR_MSR, msr_val); if (rc) return rc; rc = opal_slw_set_reg(pir, P9_STOP_SPR_PSSCR, psscr_val); if (rc) return rc; } /* HIDs are per core registers */ if (cpu_thread_in_core(cpu) == 0) { rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val); if (rc != 0) return rc; rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val); if (rc != 0) return rc; /* Only p8 needs to set extra HID registers */ if (!cpu_has_feature(CPU_FTR_ARCH_300)) { uint64_t hid1_val = mfspr(SPRN_HID1); uint64_t hid4_val = mfspr(SPRN_HID4); uint64_t hid5_val = mfspr(SPRN_HID5); rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val); if (rc != 0) return rc; rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val); if (rc != 0) return rc; rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val); if (rc != 0) return rc; } } } return 0; } u32 pnv_get_supported_cpuidle_states(void) { return supported_cpuidle_states; } EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states); static void pnv_fastsleep_workaround_apply(void *info) { int cpu = smp_processor_id(); int rc; int *err = info; if (cpu_first_thread_sibling(cpu) != cpu) return; rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP, OPAL_CONFIG_IDLE_APPLY); if (rc) *err = 1; } static bool power7_fastsleep_workaround_entry = true; static bool power7_fastsleep_workaround_exit = true; /* * Used to store fastsleep workaround state * 0 - Workaround applied/undone at fastsleep entry/exit path (Default) * 1 - Workaround applied once, never undone. */ static u8 fastsleep_workaround_applyonce; static ssize_t show_fastsleep_workaround_applyonce(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%u\n", fastsleep_workaround_applyonce); } static ssize_t store_fastsleep_workaround_applyonce(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int err; u8 val; if (kstrtou8(buf, 0, &val) || val != 1) return -EINVAL; if (fastsleep_workaround_applyonce == 1) return count; /* * fastsleep_workaround_applyonce = 1 implies * fastsleep workaround needs to be left in 'applied' state on all * the cores. Do this by- * 1. Disable the 'undo' workaround in fastsleep exit path * 2. Sendi IPIs to all the cores which have at least one online thread * 3. Disable the 'apply' workaround in fastsleep entry path * * There is no need to send ipi to cores which have all threads * offlined, as last thread of the core entering fastsleep or deeper * state would have applied workaround. */ power7_fastsleep_workaround_exit = false; cpus_read_lock(); on_each_cpu(pnv_fastsleep_workaround_apply, &err, 1); cpus_read_unlock(); if (err) { pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply"); goto fail; } power7_fastsleep_workaround_entry = false; fastsleep_workaround_applyonce = 1; return count; fail: return -EIO; } static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600, show_fastsleep_workaround_applyonce, store_fastsleep_workaround_applyonce); static inline void atomic_start_thread_idle(void) { int cpu = raw_smp_processor_id(); int first = cpu_first_thread_sibling(cpu); int thread_nr = cpu_thread_in_core(cpu); unsigned long *state = &paca_ptrs[first]->idle_state; clear_bit(thread_nr, state); } static inline void atomic_stop_thread_idle(void) { int cpu = raw_smp_processor_id(); int first = cpu_first_thread_sibling(cpu); int thread_nr = cpu_thread_in_core(cpu); unsigned long *state = &paca_ptrs[first]->idle_state; set_bit(thread_nr, state); } static inline void atomic_lock_thread_idle(void) { int cpu = raw_smp_processor_id(); int first = cpu_first_thread_sibling(cpu); unsigned long *lock = &paca_ptrs[first]->idle_lock; while (unlikely(test_and_set_bit_lock(NR_PNV_CORE_IDLE_LOCK_BIT, lock))) barrier(); } static inline void atomic_unlock_and_stop_thread_idle(void) { int cpu = raw_smp_processor_id(); int first = cpu_first_thread_sibling(cpu); unsigned long thread = 1UL << cpu_thread_in_core(cpu); unsigned long *state = &paca_ptrs[first]->idle_state; unsigned long *lock = &paca_ptrs[first]->idle_lock; u64 s = READ_ONCE(*state); u64 new, tmp; BUG_ON(!(READ_ONCE(*lock) & PNV_CORE_IDLE_LOCK_BIT)); BUG_ON(s & thread); again: new = s | thread; tmp = cmpxchg(state, s, new); if (unlikely(tmp != s)) { s = tmp; goto again; } clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, lock); } static inline void atomic_unlock_thread_idle(void) { int cpu = raw_smp_processor_id(); int first = cpu_first_thread_sibling(cpu); unsigned long *lock = &paca_ptrs[first]->idle_lock; BUG_ON(!test_bit(NR_PNV_CORE_IDLE_LOCK_BIT, lock)); clear_bit_unlock(NR_PNV_CORE_IDLE_LOCK_BIT, lock); } /* P7 and P8 */ struct p7_sprs { /* per core */ u64 tscr; u64 worc; /* per subcore */ u64 sdr1; u64 rpr; /* per thread */ u64 lpcr; u64 hfscr; u64 fscr; u64 purr; u64 spurr; u64 dscr; u64 wort; /* per thread SPRs that get lost in shallow states */ u64 amr; u64 iamr; u64 uamor; /* amor is restored to constant ~0 */ }; static unsigned long power7_idle_insn(unsigned long type) { int cpu = raw_smp_processor_id(); int first = cpu_first_thread_sibling(cpu); unsigned long *state = &paca_ptrs[first]->idle_state; unsigned long thread = 1UL << cpu_thread_in_core(cpu); unsigned long core_thread_mask = (1UL << threads_per_core) - 1; unsigned long srr1; bool full_winkle; struct p7_sprs sprs = {}; /* avoid false use-uninitialised */ bool sprs_saved = false; int rc; if (unlikely(type != PNV_THREAD_NAP)) { atomic_lock_thread_idle(); BUG_ON(!(*state & thread)); *state &= ~thread; if (power7_fastsleep_workaround_entry) { if ((*state & core_thread_mask) == 0) { rc = opal_config_cpu_idle_state( OPAL_CONFIG_IDLE_FASTSLEEP, OPAL_CONFIG_IDLE_APPLY); BUG_ON(rc); } } if (type == PNV_THREAD_WINKLE) { sprs.tscr = mfspr(SPRN_TSCR); sprs.worc = mfspr(SPRN_WORC); sprs.sdr1 = mfspr(SPRN_SDR1); sprs.rpr = mfspr(SPRN_RPR); sprs.lpcr = mfspr(SPRN_LPCR); if (cpu_has_feature(CPU_FTR_ARCH_207S)) { sprs.hfscr = mfspr(SPRN_HFSCR); sprs.fscr = mfspr(SPRN_FSCR); } sprs.purr = mfspr(SPRN_PURR); sprs.spurr = mfspr(SPRN_SPURR); sprs.dscr = mfspr(SPRN_DSCR); sprs.wort = mfspr(SPRN_WORT); sprs_saved = true; /* * Increment winkle counter and set all winkle bits if * all threads are winkling. This allows wakeup side to * distinguish between fast sleep and winkle state * loss. Fast sleep still has to resync the timebase so * this may not be a really big win. */ *state += 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; if ((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) >> PNV_CORE_IDLE_WINKLE_COUNT_SHIFT == threads_per_core) *state |= PNV_CORE_IDLE_THREAD_WINKLE_BITS; WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); } atomic_unlock_thread_idle(); } if (cpu_has_feature(CPU_FTR_ARCH_207S)) { sprs.amr = mfspr(SPRN_AMR); sprs.iamr = mfspr(SPRN_IAMR); sprs.uamor = mfspr(SPRN_UAMOR); } local_paca->thread_idle_state = type; srr1 = isa206_idle_insn_mayloss(type); /* go idle */ local_paca->thread_idle_state = PNV_THREAD_RUNNING; WARN_ON_ONCE(!srr1); WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); if (cpu_has_feature(CPU_FTR_ARCH_207S)) { if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) { /* * We don't need an isync after the mtsprs here because * the upcoming mtmsrd is execution synchronizing. */ mtspr(SPRN_AMR, sprs.amr); mtspr(SPRN_IAMR, sprs.iamr); mtspr(SPRN_AMOR, ~0); mtspr(SPRN_UAMOR, sprs.uamor); } } if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI)) hmi_exception_realmode(NULL); if (likely((srr1 & SRR1_WAKESTATE) != SRR1_WS_HVLOSS)) { if (unlikely(type != PNV_THREAD_NAP)) { atomic_lock_thread_idle(); if (type == PNV_THREAD_WINKLE) { WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT); } atomic_unlock_and_stop_thread_idle(); } return srr1; } /* HV state loss */ BUG_ON(type == PNV_THREAD_NAP); atomic_lock_thread_idle(); full_winkle = false; if (type == PNV_THREAD_WINKLE) { WARN_ON((*state & PNV_CORE_IDLE_WINKLE_COUNT_BITS) == 0); *state -= 1 << PNV_CORE_IDLE_WINKLE_COUNT_SHIFT; if (*state & (thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT)) { *state &= ~(thread << PNV_CORE_IDLE_THREAD_WINKLE_BITS_SHIFT); full_winkle = true; BUG_ON(!sprs_saved); } } WARN_ON(*state & thread); if ((*state & core_thread_mask) != 0) goto core_woken; /* Per-core SPRs */ if (full_winkle) { mtspr(SPRN_TSCR, sprs.tscr); mtspr(SPRN_WORC, sprs.worc); } if (power7_fastsleep_workaround_exit) { rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP, OPAL_CONFIG_IDLE_UNDO); BUG_ON(rc); } /* TB */ if (opal_resync_timebase() != OPAL_SUCCESS) BUG(); core_woken: if (!full_winkle) goto subcore_woken; if ((*state & local_paca->subcore_sibling_mask) != 0) goto subcore_woken; /* Per-subcore SPRs */ mtspr(SPRN_SDR1, sprs.sdr1); mtspr(SPRN_RPR, sprs.rpr); subcore_woken: /* * isync after restoring shared SPRs and before unlocking. Unlock * only contains hwsync which does not necessarily do the right * thing for SPRs. */ isync(); atomic_unlock_and_stop_thread_idle(); /* Fast sleep does not lose SPRs */ if (!full_winkle) return srr1; /* Per-thread SPRs */ mtspr(SPRN_LPCR, sprs.lpcr); if (cpu_has_feature(CPU_FTR_ARCH_207S)) { mtspr(SPRN_HFSCR, sprs.hfscr); mtspr(SPRN_FSCR, sprs.fscr); } mtspr(SPRN_PURR, sprs.purr); mtspr(SPRN_SPURR, sprs.spurr); mtspr(SPRN_DSCR, sprs.dscr); mtspr(SPRN_WORT, sprs.wort); mtspr(SPRN_SPRG3, local_paca->sprg_vdso); #ifdef CONFIG_PPC_64S_HASH_MMU /* * The SLB has to be restored here, but it sometimes still * contains entries, so the __ variant must be used to prevent * multi hits. */ __slb_restore_bolted_realmode(); #endif return srr1; } extern unsigned long idle_kvm_start_guest(unsigned long srr1); #ifdef CONFIG_HOTPLUG_CPU static unsigned long power7_offline(void) { unsigned long srr1; mtmsr(MSR_IDLE); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* Tell KVM we're entering idle. */ /******************************************************/ /* N O T E W E L L ! ! ! N O T E W E L L */ /* The following store to HSTATE_HWTHREAD_STATE(r13) */ /* MUST occur in real mode, i.e. with the MMU off, */ /* and the MMU must stay off until we clear this flag */ /* and test HSTATE_HWTHREAD_REQ(r13) in */ /* pnv_powersave_wakeup in this file. */ /* The reason is that another thread can switch the */ /* MMU to a guest context whenever this flag is set */ /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ /* that would potentially cause this thread to start */ /* executing instructions from guest memory in */ /* hypervisor mode, leading to a host crash or data */ /* corruption, or worse. */ /******************************************************/ local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE; #endif __ppc64_runlatch_off(); srr1 = power7_idle_insn(power7_offline_type); __ppc64_runlatch_on(); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL; /* Order setting hwthread_state vs. testing hwthread_req */ smp_mb(); if (local_paca->kvm_hstate.hwthread_req) srr1 = idle_kvm_start_guest(srr1); #endif mtmsr(MSR_KERNEL); return srr1; } #endif void power7_idle_type(unsigned long type) { unsigned long srr1; if (!prep_irq_for_idle_irqsoff()) return; mtmsr(MSR_IDLE); __ppc64_runlatch_off(); srr1 = power7_idle_insn(type); __ppc64_runlatch_on(); mtmsr(MSR_KERNEL); fini_irq_for_idle_irqsoff(); irq_set_pending_from_srr1(srr1); } static void power7_idle(void) { if (!powersave_nap) return; power7_idle_type(PNV_THREAD_NAP); } struct p9_sprs { /* per core */ u64 ptcr; u64 rpr; u64 tscr; u64 ldbar; /* per thread */ u64 lpcr; u64 hfscr; u64 fscr; u64 pid; u64 purr; u64 spurr; u64 dscr; u64 ciabr; u64 mmcra; u32 mmcr0; u32 mmcr1; u64 mmcr2; /* per thread SPRs that get lost in shallow states */ u64 amr; u64 iamr; u64 amor; u64 uamor; }; static unsigned long power9_idle_stop(unsigned long psscr) { int cpu = raw_smp_processor_id(); int first = cpu_first_thread_sibling(cpu); unsigned long *state = &paca_ptrs[first]->idle_state; unsigned long core_thread_mask = (1UL << threads_per_core) - 1; unsigned long srr1; unsigned long pls; unsigned long mmcr0 = 0; unsigned long mmcra = 0; struct p9_sprs sprs = {}; /* avoid false used-uninitialised */ bool sprs_saved = false; if (!(psscr & (PSSCR_EC|PSSCR_ESL))) { /* EC=ESL=0 case */ /* * Wake synchronously. SRESET via xscom may still cause * a 0x100 powersave wakeup with SRR1 reason! */ srr1 = isa300_idle_stop_noloss(psscr); /* go idle */ if (likely(!srr1)) return 0; /* * Registers not saved, can't recover! * This would be a hardware bug */ BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS); goto out; } /* EC=ESL=1 case */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE if (cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG)) { local_paca->requested_psscr = psscr; /* order setting requested_psscr vs testing dont_stop */ smp_mb(); if (atomic_read(&local_paca->dont_stop)) { local_paca->requested_psscr = 0; return 0; } } #endif if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) { /* * POWER9 DD2 can incorrectly set PMAO when waking up * after a state-loss idle. Saving and restoring MMCR0 * over idle is a workaround. */ mmcr0 = mfspr(SPRN_MMCR0); } if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) { sprs.lpcr = mfspr(SPRN_LPCR); sprs.hfscr = mfspr(SPRN_HFSCR); sprs.fscr = mfspr(SPRN_FSCR); sprs.pid = mfspr(SPRN_PID); sprs.purr = mfspr(SPRN_PURR); sprs.spurr = mfspr(SPRN_SPURR); sprs.dscr = mfspr(SPRN_DSCR); sprs.ciabr = mfspr(SPRN_CIABR); sprs.mmcra = mfspr(SPRN_MMCRA); sprs.mmcr0 = mfspr(SPRN_MMCR0); sprs.mmcr1 = mfspr(SPRN_MMCR1); sprs.mmcr2 = mfspr(SPRN_MMCR2); sprs.ptcr = mfspr(SPRN_PTCR); sprs.rpr = mfspr(SPRN_RPR); sprs.tscr = mfspr(SPRN_TSCR); if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR)) sprs.ldbar = mfspr(SPRN_LDBAR); sprs_saved = true; atomic_start_thread_idle(); } sprs.amr = mfspr(SPRN_AMR); sprs.iamr = mfspr(SPRN_IAMR); sprs.uamor = mfspr(SPRN_UAMOR); srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE local_paca->requested_psscr = 0; #endif psscr = mfspr(SPRN_PSSCR); WARN_ON_ONCE(!srr1); WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) { /* * We don't need an isync after the mtsprs here because the * upcoming mtmsrd is execution synchronizing. */ mtspr(SPRN_AMR, sprs.amr); mtspr(SPRN_IAMR, sprs.iamr); mtspr(SPRN_AMOR, ~0); mtspr(SPRN_UAMOR, sprs.uamor); /* * Workaround for POWER9 DD2.0, if we lost resources, the ERAT * might have been corrupted and needs flushing. We also need * to reload MMCR0 (see mmcr0 comment above). */ if (!cpu_has_feature(CPU_FTR_POWER9_DD2_1)) { asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT); mtspr(SPRN_MMCR0, mmcr0); } /* * DD2.2 and earlier need to set then clear bit 60 in MMCRA * to ensure the PMU starts running. */ mmcra = mfspr(SPRN_MMCRA); mmcra |= PPC_BIT(60); mtspr(SPRN_MMCRA, mmcra); mmcra &= ~PPC_BIT(60); mtspr(SPRN_MMCRA, mmcra); } if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI)) hmi_exception_realmode(NULL); /* * On POWER9, SRR1 bits do not match exactly as expected. * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so * just always test PSSCR for SPR/TB state loss. */ pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT; if (likely(pls < deep_spr_loss_state)) { if (sprs_saved) atomic_stop_thread_idle(); goto out; } /* HV state loss */ BUG_ON(!sprs_saved); atomic_lock_thread_idle(); if ((*state & core_thread_mask) != 0) goto core_woken; /* Per-core SPRs */ mtspr(SPRN_PTCR, sprs.ptcr); mtspr(SPRN_RPR, sprs.rpr); mtspr(SPRN_TSCR, sprs.tscr); if (pls >= pnv_first_tb_loss_level) { /* TB loss */ if (opal_resync_timebase() != OPAL_SUCCESS) BUG(); } /* * isync after restoring shared SPRs and before unlocking. Unlock * only contains hwsync which does not necessarily do the right * thing for SPRs. */ isync(); core_woken: atomic_unlock_and_stop_thread_idle(); /* Per-thread SPRs */ mtspr(SPRN_LPCR, sprs.lpcr); mtspr(SPRN_HFSCR, sprs.hfscr); mtspr(SPRN_FSCR, sprs.fscr); mtspr(SPRN_PID, sprs.pid); mtspr(SPRN_PURR, sprs.purr); mtspr(SPRN_SPURR, sprs.spurr); mtspr(SPRN_DSCR, sprs.dscr); mtspr(SPRN_CIABR, sprs.ciabr); mtspr(SPRN_MMCRA, sprs.mmcra); mtspr(SPRN_MMCR0, sprs.mmcr0); mtspr(SPRN_MMCR1, sprs.mmcr1); mtspr(SPRN_MMCR2, sprs.mmcr2); if (!firmware_has_feature(FW_FEATURE_ULTRAVISOR)) mtspr(SPRN_LDBAR, sprs.ldbar); mtspr(SPRN_SPRG3, local_paca->sprg_vdso); if (!radix_enabled()) __slb_restore_bolted_realmode(); out: mtmsr(MSR_KERNEL); return srr1; } #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* * This is used in working around bugs in thread reconfiguration * on POWER9 (at least up to Nimbus DD2.2) relating to transactional * memory and the way that XER[SO] is checkpointed. * This function forces the core into SMT4 in order by asking * all other threads not to stop, and sending a message to any * that are in a stop state. * Must be called with preemption disabled. */ void pnv_power9_force_smt4_catch(void) { int cpu, cpu0, thr; int awake_threads = 1; /* this thread is awake */ int poke_threads = 0; int need_awake = threads_per_core; cpu = smp_processor_id(); cpu0 = cpu & ~(threads_per_core - 1); for (thr = 0; thr < threads_per_core; ++thr) { if (cpu != cpu0 + thr) atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop); } /* order setting dont_stop vs testing requested_psscr */ smp_mb(); for (thr = 0; thr < threads_per_core; ++thr) { if (!paca_ptrs[cpu0+thr]->requested_psscr) ++awake_threads; else poke_threads |= (1 << thr); } /* If at least 3 threads are awake, the core is in SMT4 already */ if (awake_threads < need_awake) { /* We have to wake some threads; we'll use msgsnd */ for (thr = 0; thr < threads_per_core; ++thr) { if (poke_threads & (1 << thr)) { ppc_msgsnd_sync(); ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, paca_ptrs[cpu0+thr]->hw_cpu_id); } } /* now spin until at least 3 threads are awake */ do { for (thr = 0; thr < threads_per_core; ++thr) { if ((poke_threads & (1 << thr)) && !paca_ptrs[cpu0+thr]->requested_psscr) { ++awake_threads; poke_threads &= ~(1 << thr); } } } while (awake_threads < need_awake); } } EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch); void pnv_power9_force_smt4_release(void) { int cpu, cpu0, thr; cpu = smp_processor_id(); cpu0 = cpu & ~(threads_per_core - 1); /* clear all the dont_stop flags */ for (thr = 0; thr < threads_per_core; ++thr) { if (cpu != cpu0 + thr) atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop); } } EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release); #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ struct p10_sprs { /* * SPRs that get lost in shallow states: * * P10 loses CR, LR, CTR, FPSCR, VSCR, XER, TAR, SPRG2, and HSPRG1 * isa300 idle routines restore CR, LR. * CTR is volatile * idle thread doesn't use FP or VEC * kernel doesn't use TAR * HSPRG1 is only live in HV interrupt entry * SPRG2 is only live in KVM guests, KVM handles it. */ }; static unsigned long power10_idle_stop(unsigned long psscr) { int cpu = raw_smp_processor_id(); int first = cpu_first_thread_sibling(cpu); unsigned long *state = &paca_ptrs[first]->idle_state; unsigned long core_thread_mask = (1UL << threads_per_core) - 1; unsigned long srr1; unsigned long pls; // struct p10_sprs sprs = {}; /* avoid false used-uninitialised */ bool sprs_saved = false; if (!(psscr & (PSSCR_EC|PSSCR_ESL))) { /* EC=ESL=0 case */ /* * Wake synchronously. SRESET via xscom may still cause * a 0x100 powersave wakeup with SRR1 reason! */ srr1 = isa300_idle_stop_noloss(psscr); /* go idle */ if (likely(!srr1)) return 0; /* * Registers not saved, can't recover! * This would be a hardware bug */ BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS); goto out; } /* EC=ESL=1 case */ if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) { /* XXX: save SPRs for deep state loss here. */ sprs_saved = true; atomic_start_thread_idle(); } srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */ psscr = mfspr(SPRN_PSSCR); WARN_ON_ONCE(!srr1); WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR)); if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI)) hmi_exception_realmode(NULL); /* * On POWER10, SRR1 bits do not match exactly as expected. * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so * just always test PSSCR for SPR/TB state loss. */ pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT; if (likely(pls < deep_spr_loss_state)) { if (sprs_saved) atomic_stop_thread_idle(); goto out; } /* HV state loss */ BUG_ON(!sprs_saved); atomic_lock_thread_idle(); if ((*state & core_thread_mask) != 0) goto core_woken; /* XXX: restore per-core SPRs here */ if (pls >= pnv_first_tb_loss_level) { /* TB loss */ if (opal_resync_timebase() != OPAL_SUCCESS) BUG(); } /* * isync after restoring shared SPRs and before unlocking. Unlock * only contains hwsync which does not necessarily do the right * thing for SPRs. */ isync(); core_woken: atomic_unlock_and_stop_thread_idle(); /* XXX: restore per-thread SPRs here */ if (!radix_enabled()) __slb_restore_bolted_realmode(); out: mtmsr(MSR_KERNEL); return srr1; } #ifdef CONFIG_HOTPLUG_CPU static unsigned long arch300_offline_stop(unsigned long psscr) { unsigned long srr1; if (cpu_has_feature(CPU_FTR_ARCH_31)) srr1 = power10_idle_stop(psscr); else srr1 = power9_idle_stop(psscr); return srr1; } #endif void arch300_idle_type(unsigned long stop_psscr_val, unsigned long stop_psscr_mask) { unsigned long psscr; unsigned long srr1; if (!prep_irq_for_idle_irqsoff()) return; psscr = mfspr(SPRN_PSSCR); psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val; __ppc64_runlatch_off(); if (cpu_has_feature(CPU_FTR_ARCH_31)) srr1 = power10_idle_stop(psscr); else srr1 = power9_idle_stop(psscr); __ppc64_runlatch_on(); fini_irq_for_idle_irqsoff(); irq_set_pending_from_srr1(srr1); } /* * Used for ppc_md.power_save which needs a function with no parameters */ static void arch300_idle(void) { arch300_idle_type(pnv_default_stop_val, pnv_default_stop_mask); } #ifdef CONFIG_HOTPLUG_CPU void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) { u64 pir = get_hard_smp_processor_id(cpu); mtspr(SPRN_LPCR, lpcr_val); /* * Program the LPCR via stop-api only if the deepest stop state * can lose hypervisor context. */ if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); } /* * pnv_cpu_offline: A function that puts the CPU into the deepest * available platform idle state on a CPU-Offline. * interrupts hard disabled and no lazy irq pending. */ unsigned long pnv_cpu_offline(unsigned int cpu) { unsigned long srr1; __ppc64_runlatch_off(); if (cpu_has_feature(CPU_FTR_ARCH_300) && deepest_stop_found) { unsigned long psscr; psscr = mfspr(SPRN_PSSCR); psscr = (psscr & ~pnv_deepest_stop_psscr_mask) | pnv_deepest_stop_psscr_val; srr1 = arch300_offline_stop(psscr); } else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) { srr1 = power7_offline(); } else { /* This is the fallback method. We emulate snooze */ while (!generic_check_cpu_restart(cpu)) { HMT_low(); HMT_very_low(); } srr1 = 0; HMT_medium(); } __ppc64_runlatch_on(); return srr1; } #endif /* * Power ISA 3.0 idle initialization. * * POWER ISA 3.0 defines a new SPR Processor stop Status and Control * Register (PSSCR) to control idle behavior. * * PSSCR layout: * ---------------------------------------------------------- * | PLS | /// | SD | ESL | EC | PSLL | /// | TR | MTL | RL | * ---------------------------------------------------------- * 0 4 41 42 43 44 48 54 56 60 * * PSSCR key fields: * Bits 0:3 - Power-Saving Level Status (PLS). This field indicates the * lowest power-saving state the thread entered since stop instruction was * last executed. * * Bit 41 - Status Disable(SD) * 0 - Shows PLS entries * 1 - PLS entries are all 0 * * Bit 42 - Enable State Loss * 0 - No state is lost irrespective of other fields * 1 - Allows state loss * * Bit 43 - Exit Criterion * 0 - Exit from power-save mode on any interrupt * 1 - Exit from power-save mode controlled by LPCR's PECE bits * * Bits 44:47 - Power-Saving Level Limit * This limits the power-saving level that can be entered into. * * Bits 60:63 - Requested Level * Used to specify which power-saving level must be entered on executing * stop instruction */ int __init validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags) { int err = 0; /* * psscr_mask == 0xf indicates an older firmware. * Set remaining fields of psscr to the default values. * See NOTE above definition of PSSCR_HV_DEFAULT_VAL */ if (*psscr_mask == 0xf) { *psscr_val = *psscr_val | PSSCR_HV_DEFAULT_VAL; *psscr_mask = PSSCR_HV_DEFAULT_MASK; return err; } /* * New firmware is expected to set the psscr_val bits correctly. * Validate that the following invariants are correctly maintained by * the new firmware. * - ESL bit value matches the EC bit value. * - ESL bit is set for all the deep stop states. */ if (GET_PSSCR_ESL(*psscr_val) != GET_PSSCR_EC(*psscr_val)) { err = ERR_EC_ESL_MISMATCH; } else if ((flags & OPAL_PM_LOSE_FULL_CONTEXT) && GET_PSSCR_ESL(*psscr_val) == 0) { err = ERR_DEEP_STATE_ESL_MISMATCH; } return err; } /* * pnv_arch300_idle_init: Initializes the default idle state, first * deep idle state and deepest idle state on * ISA 3.0 CPUs. * * @np: /ibm,opal/power-mgt device node * @flags: cpu-idle-state-flags array * @dt_idle_states: Number of idle state entries * Returns 0 on success */ static void __init pnv_arch300_idle_init(void) { u64 max_residency_ns = 0; int i; /* stop is not really architected, we only have p9,p10 drivers */ if (!pvr_version_is(PVR_POWER10) && !pvr_version_is(PVR_POWER9)) return; /* * pnv_deepest_stop_{val,mask} should be set to values corresponding to * the deepest stop state. * * pnv_default_stop_{val,mask} should be set to values corresponding to * the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state. */ pnv_first_tb_loss_level = MAX_STOP_STATE + 1; deep_spr_loss_state = MAX_STOP_STATE + 1; for (i = 0; i < nr_pnv_idle_states; i++) { int err; struct pnv_idle_states_t *state = &pnv_idle_states[i]; u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK; /* No deep loss driver implemented for POWER10 yet */ if (pvr_version_is(PVR_POWER10) && state->flags & (OPAL_PM_TIMEBASE_STOP|OPAL_PM_LOSE_FULL_CONTEXT)) continue; if ((state->flags & OPAL_PM_TIMEBASE_STOP) && (pnv_first_tb_loss_level > psscr_rl)) pnv_first_tb_loss_level = psscr_rl; if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) && (deep_spr_loss_state > psscr_rl)) deep_spr_loss_state = psscr_rl; /* * The idle code does not deal with TB loss occurring * in a shallower state than SPR loss, so force it to * behave like SPRs are lost if TB is lost. POWER9 would * never encounter this, but a POWER8 core would if it * implemented the stop instruction. So this is for forward * compatibility. */ if ((state->flags & OPAL_PM_TIMEBASE_STOP) && (deep_spr_loss_state > psscr_rl)) deep_spr_loss_state = psscr_rl; err = validate_psscr_val_mask(&state->psscr_val, &state->psscr_mask, state->flags); if (err) { report_invalid_psscr_val(state->psscr_val, err); continue; } state->valid = true; if (max_residency_ns < state->residency_ns) { max_residency_ns = state->residency_ns; pnv_deepest_stop_psscr_val = state->psscr_val; pnv_deepest_stop_psscr_mask = state->psscr_mask; pnv_deepest_stop_flag = state->flags; deepest_stop_found = true; } if (!default_stop_found && (state->flags & OPAL_PM_STOP_INST_FAST)) { pnv_default_stop_val = state->psscr_val; pnv_default_stop_mask = state->psscr_mask; default_stop_found = true; WARN_ON(state->flags & OPAL_PM_LOSE_FULL_CONTEXT); } } if (unlikely(!default_stop_found)) { pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n"); } else { ppc_md.power_save = arch300_idle; pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n", pnv_default_stop_val, pnv_default_stop_mask); } if (unlikely(!deepest_stop_found)) { pr_warn("cpuidle-powernv: No suitable stop state for CPU-Hotplug. Offlined CPUs will busy wait"); } else { pr_info("cpuidle-powernv: Deepest stop: psscr = 0x%016llx,mask=0x%016llx\n", pnv_deepest_stop_psscr_val, pnv_deepest_stop_psscr_mask); } pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n", deep_spr_loss_state); pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n", pnv_first_tb_loss_level); } static void __init pnv_disable_deep_states(void) { /* * The stop-api is unable to restore hypervisor * resources on wakeup from platform idle states which * lose full context. So disable such states. */ supported_cpuidle_states &= ~OPAL_PM_LOSE_FULL_CONTEXT; pr_warn("cpuidle-powernv: Disabling idle states that lose full context\n"); pr_warn("cpuidle-powernv: Idle power-savings, CPU-Hotplug affected\n"); if (cpu_has_feature(CPU_FTR_ARCH_300) && (pnv_deepest_stop_flag & OPAL_PM_LOSE_FULL_CONTEXT)) { /* * Use the default stop state for CPU-Hotplug * if available. */ if (default_stop_found) { pnv_deepest_stop_psscr_val = pnv_default_stop_val; pnv_deepest_stop_psscr_mask = pnv_default_stop_mask; pr_warn("cpuidle-powernv: Offlined CPUs will stop with psscr = 0x%016llx\n", pnv_deepest_stop_psscr_val); } else { /* Fallback to snooze loop for CPU-Hotplug */ deepest_stop_found = false; pr_warn("cpuidle-powernv: Offlined CPUs will busy wait\n"); } } } /* * Probe device tree for supported idle states */ static void __init pnv_probe_idle_states(void) { int i; if (nr_pnv_idle_states < 0) { pr_warn("cpuidle-powernv: no idle states found in the DT\n"); return; } if (cpu_has_feature(CPU_FTR_ARCH_300)) pnv_arch300_idle_init(); for (i = 0; i < nr_pnv_idle_states; i++) supported_cpuidle_states |= pnv_idle_states[i].flags; } /* * This function parses device-tree and populates all the information * into pnv_idle_states structure. It also sets up nr_pnv_idle_states * which is the number of cpuidle states discovered through device-tree. */ static int __init pnv_parse_cpuidle_dt(void) { struct device_node *np; int nr_idle_states, i; int rc = 0; u32 *temp_u32; u64 *temp_u64; const char **temp_string; np = of_find_node_by_path("/ibm,opal/power-mgt"); if (!np) { pr_warn("opal: PowerMgmt Node not found\n"); return -ENODEV; } nr_idle_states = of_property_count_u32_elems(np, "ibm,cpu-idle-state-flags"); pnv_idle_states = kcalloc(nr_idle_states, sizeof(*pnv_idle_states), GFP_KERNEL); temp_u32 = kcalloc(nr_idle_states, sizeof(u32), GFP_KERNEL); temp_u64 = kcalloc(nr_idle_states, sizeof(u64), GFP_KERNEL); temp_string = kcalloc(nr_idle_states, sizeof(char *), GFP_KERNEL); if (!(pnv_idle_states && temp_u32 && temp_u64 && temp_string)) { pr_err("Could not allocate memory for dt parsing\n"); rc = -ENOMEM; goto out; } /* Read flags */ if (of_property_read_u32_array(np, "ibm,cpu-idle-state-flags", temp_u32, nr_idle_states)) { pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n"); rc = -EINVAL; goto out; } for (i = 0; i < nr_idle_states; i++) pnv_idle_states[i].flags = temp_u32[i]; /* Read latencies */ if (of_property_read_u32_array(np, "ibm,cpu-idle-state-latencies-ns", temp_u32, nr_idle_states)) { pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n"); rc = -EINVAL; goto out; } for (i = 0; i < nr_idle_states; i++) pnv_idle_states[i].latency_ns = temp_u32[i]; /* Read residencies */ if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns", temp_u32, nr_idle_states)) { pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n"); rc = -EINVAL; goto out; } for (i = 0; i < nr_idle_states; i++) pnv_idle_states[i].residency_ns = temp_u32[i]; /* For power9 and later */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { /* Read pm_crtl_val */ if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr", temp_u64, nr_idle_states)) { pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr in DT\n"); rc = -EINVAL; goto out; } for (i = 0; i < nr_idle_states; i++) pnv_idle_states[i].psscr_val = temp_u64[i]; /* Read pm_crtl_mask */ if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr-mask", temp_u64, nr_idle_states)) { pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-psscr-mask in DT\n"); rc = -EINVAL; goto out; } for (i = 0; i < nr_idle_states; i++) pnv_idle_states[i].psscr_mask = temp_u64[i]; } /* * power8 specific properties ibm,cpu-idle-state-pmicr-mask and * ibm,cpu-idle-state-pmicr-val were never used and there is no * plan to use it in near future. Hence, not parsing these properties */ if (of_property_read_string_array(np, "ibm,cpu-idle-state-names", temp_string, nr_idle_states) < 0) { pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-names in DT\n"); rc = -EINVAL; goto out; } for (i = 0; i < nr_idle_states; i++) strscpy(pnv_idle_states[i].name, temp_string[i], PNV_IDLE_NAME_LEN); nr_pnv_idle_states = nr_idle_states; rc = 0; out: kfree(temp_u32); kfree(temp_u64); kfree(temp_string); of_node_put(np); return rc; } static int __init pnv_init_idle_states(void) { int cpu; int rc = 0; /* Set up PACA fields */ for_each_present_cpu(cpu) { struct paca_struct *p = paca_ptrs[cpu]; p->idle_state = 0; if (cpu == cpu_first_thread_sibling(cpu)) p->idle_state = (1 << threads_per_core) - 1; if (!cpu_has_feature(CPU_FTR_ARCH_300)) { /* P7/P8 nap */ p->thread_idle_state = PNV_THREAD_RUNNING; } else if (pvr_version_is(PVR_POWER9)) { /* P9 stop workarounds */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE p->requested_psscr = 0; atomic_set(&p->dont_stop, 0); #endif } } /* In case we error out nr_pnv_idle_states will be zero */ nr_pnv_idle_states = 0; supported_cpuidle_states = 0; if (cpuidle_disable != IDLE_NO_OVERRIDE) goto out; rc = pnv_parse_cpuidle_dt(); if (rc) return rc; pnv_probe_idle_states(); if (!cpu_has_feature(CPU_FTR_ARCH_300)) { if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) { power7_fastsleep_workaround_entry = false; power7_fastsleep_workaround_exit = false; } else { struct device *dev_root; /* * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that * workaround is needed to use fastsleep. Provide sysfs * control to choose how this workaround has to be * applied. */ dev_root = bus_get_dev_root(&cpu_subsys); if (dev_root) { device_create_file(dev_root, &dev_attr_fastsleep_workaround_applyonce); put_device(dev_root); } } update_subcore_sibling_mask(); if (supported_cpuidle_states & OPAL_PM_NAP_ENABLED) { ppc_md.power_save = power7_idle; power7_offline_type = PNV_THREAD_NAP; } if ((supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED) && (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT)) power7_offline_type = PNV_THREAD_WINKLE; else if ((supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED) || (supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) power7_offline_type = PNV_THREAD_SLEEP; } if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) { if (pnv_save_sprs_for_deep_states()) pnv_disable_deep_states(); } out: return 0; } machine_subsys_initcall(powernv, pnv_init_idle_states);
linux-master
arch/powerpc/platforms/powernv/idle.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2014-2016 IBM Corp. */ #include <linux/module.h> #include <misc/cxl-base.h> #include <asm/pnv-pci.h> #include <asm/opal.h> #include "pci.h" int pnv_phb_to_cxl_mode(struct pci_dev *dev, uint64_t mode) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; struct pnv_ioda_pe *pe; int rc; pe = pnv_ioda_get_pe(dev); if (!pe) return -ENODEV; pe_info(pe, "Switching PHB to CXL\n"); rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number); if (rc == OPAL_UNSUPPORTED) dev_err(&dev->dev, "Required cxl mode not supported by firmware - update skiboot\n"); else if (rc) dev_err(&dev->dev, "opal_pci_set_phb_cxl_mode failed: %i\n", rc); return rc; } EXPORT_SYMBOL(pnv_phb_to_cxl_mode); /* Find PHB for cxl dev and allocate MSI hwirqs? * Returns the absolute hardware IRQ number */ int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; int hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, num); if (hwirq < 0) { dev_warn(&dev->dev, "Failed to find a free MSI\n"); return -ENOSPC; } return phb->msi_base + hwirq; } EXPORT_SYMBOL(pnv_cxl_alloc_hwirqs); void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, num); } EXPORT_SYMBOL(pnv_cxl_release_hwirqs); void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs, struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; int i, hwirq; for (i = 1; i < CXL_IRQ_RANGES; i++) { if (!irqs->range[i]) continue; pr_devel("cxl release irq range 0x%x: offset: 0x%lx limit: %ld\n", i, irqs->offset[i], irqs->range[i]); hwirq = irqs->offset[i] - phb->msi_base; msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, irqs->range[i]); } } EXPORT_SYMBOL(pnv_cxl_release_hwirq_ranges); int pnv_cxl_alloc_hwirq_ranges(struct cxl_irq_ranges *irqs, struct pci_dev *dev, int num) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; int i, hwirq, try; memset(irqs, 0, sizeof(struct cxl_irq_ranges)); /* 0 is reserved for the multiplexed PSL DSI interrupt */ for (i = 1; i < CXL_IRQ_RANGES && num; i++) { try = num; while (try) { hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, try); if (hwirq >= 0) break; try /= 2; } if (!try) goto fail; irqs->offset[i] = phb->msi_base + hwirq; irqs->range[i] = try; pr_devel("cxl alloc irq range 0x%x: offset: 0x%lx limit: %li\n", i, irqs->offset[i], irqs->range[i]); num -= try; } if (num) goto fail; return 0; fail: pnv_cxl_release_hwirq_ranges(irqs, dev); return -ENOSPC; } EXPORT_SYMBOL(pnv_cxl_alloc_hwirq_ranges); int pnv_cxl_get_irq_count(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; return phb->msi_bmp.irq_count; } EXPORT_SYMBOL(pnv_cxl_get_irq_count); int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq, unsigned int virq) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; unsigned int xive_num = hwirq - phb->msi_base; struct pnv_ioda_pe *pe; int rc; if (!(pe = pnv_ioda_get_pe(dev))) return -ENODEV; /* Assign XIVE to PE */ rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); if (rc) { pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x " "hwirq 0x%x XIVE 0x%x PE\n", pci_name(dev), rc, phb->msi_base, hwirq, xive_num); return -EIO; } pnv_set_msi_irq_chip(phb, virq); return 0; } EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
linux-master
arch/powerpc/platforms/powernv/pci-cxl.c
// SPDX-License-Identifier: GPL-2.0+ /* * VAS Fault handling. * Copyright 2019, IBM Corporation */ #define pr_fmt(fmt) "vas: " fmt #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/kthread.h> #include <linux/sched/signal.h> #include <linux/mmu_context.h> #include <asm/icswx.h> #include "vas.h" /* * The maximum FIFO size for fault window can be 8MB * (VAS_RX_FIFO_SIZE_MAX). Using 4MB FIFO since each VAS * instance will be having fault window. * 8MB FIFO can be used if expects more faults for each VAS * instance. */ #define VAS_FAULT_WIN_FIFO_SIZE (4 << 20) static void dump_fifo(struct vas_instance *vinst, void *entry) { unsigned long *end = vinst->fault_fifo + vinst->fault_fifo_size; unsigned long *fifo = entry; int i; pr_err("Fault fifo size %d, Max crbs %d\n", vinst->fault_fifo_size, vinst->fault_fifo_size / CRB_SIZE); /* Dump 10 CRB entries or until end of FIFO */ pr_err("Fault FIFO Dump:\n"); for (i = 0; i < 10*(CRB_SIZE/8) && fifo < end; i += 4, fifo += 4) { pr_err("[%.3d, %p]: 0x%.16lx 0x%.16lx 0x%.16lx 0x%.16lx\n", i, fifo, *fifo, *(fifo+1), *(fifo+2), *(fifo+3)); } } /* * Process valid CRBs in fault FIFO. * NX process user space requests, return credit and update the status * in CRB. If it encounters transalation error when accessing CRB or * request buffers, raises interrupt on the CPU to handle the fault. * It takes credit on fault window, updates nx_fault_stamp in CRB with * the following information and pastes CRB in fault FIFO. * * pswid - window ID of the window on which the request is sent. * fault_storage_addr - fault address * * It can raise a single interrupt for multiple faults. Expects OS to * process all valid faults and return credit for each fault on user * space and fault windows. This fault FIFO control will be done with * credit mechanism. NX can continuously paste CRBs until credits are not * available on fault window. Otherwise, returns with RMA_reject. * * Total credits available on fault window: FIFO_SIZE(4MB)/CRBS_SIZE(128) * */ irqreturn_t vas_fault_thread_fn(int irq, void *data) { struct vas_instance *vinst = data; struct coprocessor_request_block *crb, *entry; struct coprocessor_request_block buf; struct pnv_vas_window *window; unsigned long flags; void *fifo; crb = &buf; /* * VAS can interrupt with multiple page faults. So process all * valid CRBs within fault FIFO until reaches invalid CRB. * We use CCW[0] and pswid to validate CRBs: * * CCW[0] Reserved bit. When NX pastes CRB, CCW[0]=0 * OS sets this bit to 1 after reading CRB. * pswid NX assigns window ID. Set pswid to -1 after * reading CRB from fault FIFO. * * We exit this function if no valid CRBs are available to process. * So acquire fault_lock and reset fifo_in_progress to 0 before * exit. * In case kernel receives another interrupt with different page * fault, interrupt handler returns with IRQ_HANDLED if * fifo_in_progress is set. Means these new faults will be * handled by the current thread. Otherwise set fifo_in_progress * and return IRQ_WAKE_THREAD to wake up thread. */ while (true) { spin_lock_irqsave(&vinst->fault_lock, flags); /* * Advance the fault fifo pointer to next CRB. * Use CRB_SIZE rather than sizeof(*crb) since the latter is * aligned to CRB_ALIGN (256) but the CRB written to by VAS is * only CRB_SIZE in len. */ fifo = vinst->fault_fifo + (vinst->fault_crbs * CRB_SIZE); entry = fifo; if ((entry->stamp.nx.pswid == cpu_to_be32(FIFO_INVALID_ENTRY)) || (entry->ccw & cpu_to_be32(CCW0_INVALID))) { vinst->fifo_in_progress = 0; spin_unlock_irqrestore(&vinst->fault_lock, flags); return IRQ_HANDLED; } spin_unlock_irqrestore(&vinst->fault_lock, flags); vinst->fault_crbs++; if (vinst->fault_crbs == (vinst->fault_fifo_size / CRB_SIZE)) vinst->fault_crbs = 0; memcpy(crb, fifo, CRB_SIZE); entry->stamp.nx.pswid = cpu_to_be32(FIFO_INVALID_ENTRY); entry->ccw |= cpu_to_be32(CCW0_INVALID); /* * Return credit for the fault window. */ vas_return_credit(vinst->fault_win, false); pr_devel("VAS[%d] fault_fifo %p, fifo %p, fault_crbs %d\n", vinst->vas_id, vinst->fault_fifo, fifo, vinst->fault_crbs); vas_dump_crb(crb); window = vas_pswid_to_window(vinst, be32_to_cpu(crb->stamp.nx.pswid)); if (IS_ERR(window)) { /* * We got an interrupt about a specific send * window but we can't find that window and we can't * even clean it up (return credit on user space * window). * But we should not get here. * TODO: Disable IRQ. */ dump_fifo(vinst, (void *)entry); pr_err("VAS[%d] fault_fifo %p, fifo %p, pswid 0x%x, fault_crbs %d bad CRB?\n", vinst->vas_id, vinst->fault_fifo, fifo, be32_to_cpu(crb->stamp.nx.pswid), vinst->fault_crbs); WARN_ON_ONCE(1); } else { /* * NX sees faults only with user space windows. */ if (window->user_win) vas_update_csb(crb, &window->vas_win.task_ref); else WARN_ON_ONCE(!window->user_win); /* * Return credit for send window after processing * fault CRB. */ vas_return_credit(window, true); } } } irqreturn_t vas_fault_handler(int irq, void *dev_id) { struct vas_instance *vinst = dev_id; irqreturn_t ret = IRQ_WAKE_THREAD; unsigned long flags; /* * NX can generate an interrupt for multiple faults. So the * fault handler thread process all CRBs until finds invalid * entry. In case if NX sees continuous faults, it is possible * that the thread function entered with the first interrupt * can execute and process all valid CRBs. * So wake up thread only if the fault thread is not in progress. */ spin_lock_irqsave(&vinst->fault_lock, flags); if (vinst->fifo_in_progress) ret = IRQ_HANDLED; else vinst->fifo_in_progress = 1; spin_unlock_irqrestore(&vinst->fault_lock, flags); return ret; } /* * Fault window is opened per VAS instance. NX pastes fault CRB in fault * FIFO upon page faults. */ int vas_setup_fault_window(struct vas_instance *vinst) { struct vas_rx_win_attr attr; struct vas_window *win; vinst->fault_fifo_size = VAS_FAULT_WIN_FIFO_SIZE; vinst->fault_fifo = kzalloc(vinst->fault_fifo_size, GFP_KERNEL); if (!vinst->fault_fifo) { pr_err("Unable to alloc %d bytes for fault_fifo\n", vinst->fault_fifo_size); return -ENOMEM; } /* * Invalidate all CRB entries. NX pastes valid entry for each fault. */ memset(vinst->fault_fifo, FIFO_INVALID_ENTRY, vinst->fault_fifo_size); vas_init_rx_win_attr(&attr, VAS_COP_TYPE_FAULT); attr.rx_fifo_size = vinst->fault_fifo_size; attr.rx_fifo = __pa(vinst->fault_fifo); /* * Max creds is based on number of CRBs can fit in the FIFO. * (fault_fifo_size/CRB_SIZE). If 8MB FIFO is used, max creds * will be 0xffff since the receive creds field is 16bits wide. */ attr.wcreds_max = vinst->fault_fifo_size / CRB_SIZE; attr.lnotify_lpid = 0; attr.lnotify_pid = mfspr(SPRN_PID); attr.lnotify_tid = mfspr(SPRN_PID); win = vas_rx_win_open(vinst->vas_id, VAS_COP_TYPE_FAULT, &attr); if (IS_ERR(win)) { pr_err("VAS: Error %ld opening FaultWin\n", PTR_ERR(win)); kfree(vinst->fault_fifo); return PTR_ERR(win); } vinst->fault_win = container_of(win, struct pnv_vas_window, vas_win); pr_devel("VAS: Created FaultWin %d, LPID/PID/TID [%d/%d/%d]\n", vinst->fault_win->vas_win.winid, attr.lnotify_lpid, attr.lnotify_pid, attr.lnotify_tid); return 0; }
linux-master
arch/powerpc/platforms/powernv/vas-fault.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2016-17 IBM Corp. */ #define pr_fmt(fmt) "vas: " fmt #include <linux/types.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/log2.h> #include <linux/rcupdate.h> #include <linux/cred.h> #include <linux/sched/mm.h> #include <linux/mmu_context.h> #include <asm/switch_to.h> #include <asm/ppc-opcode.h> #include <asm/vas.h> #include "vas.h" #include "copy-paste.h" #define CREATE_TRACE_POINTS #include "vas-trace.h" /* * Compute the paste address region for the window @window using the * ->paste_base_addr and ->paste_win_id_shift we got from device tree. */ void vas_win_paste_addr(struct pnv_vas_window *window, u64 *addr, int *len) { int winid; u64 base, shift; base = window->vinst->paste_base_addr; shift = window->vinst->paste_win_id_shift; winid = window->vas_win.winid; *addr = base + (winid << shift); if (len) *len = PAGE_SIZE; pr_debug("Txwin #%d: Paste addr 0x%llx\n", winid, *addr); } static inline void get_hvwc_mmio_bar(struct pnv_vas_window *window, u64 *start, int *len) { u64 pbaddr; pbaddr = window->vinst->hvwc_bar_start; *start = pbaddr + window->vas_win.winid * VAS_HVWC_SIZE; *len = VAS_HVWC_SIZE; } static inline void get_uwc_mmio_bar(struct pnv_vas_window *window, u64 *start, int *len) { u64 pbaddr; pbaddr = window->vinst->uwc_bar_start; *start = pbaddr + window->vas_win.winid * VAS_UWC_SIZE; *len = VAS_UWC_SIZE; } /* * Map the paste bus address of the given send window into kernel address * space. Unlike MMIO regions (map_mmio_region() below), paste region must * be mapped cache-able and is only applicable to send windows. */ static void *map_paste_region(struct pnv_vas_window *txwin) { int len; void *map; char *name; u64 start; name = kasprintf(GFP_KERNEL, "window-v%d-w%d", txwin->vinst->vas_id, txwin->vas_win.winid); if (!name) goto free_name; txwin->paste_addr_name = name; vas_win_paste_addr(txwin, &start, &len); if (!request_mem_region(start, len, name)) { pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n", __func__, start, len); goto free_name; } map = ioremap_cache(start, len); if (!map) { pr_devel("%s(): ioremap_cache(0x%llx, %d) failed\n", __func__, start, len); goto free_name; } pr_devel("Mapped paste addr 0x%llx to kaddr 0x%p\n", start, map); return map; free_name: kfree(name); return ERR_PTR(-ENOMEM); } static void *map_mmio_region(char *name, u64 start, int len) { void *map; if (!request_mem_region(start, len, name)) { pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n", __func__, start, len); return NULL; } map = ioremap(start, len); if (!map) { pr_devel("%s(): ioremap(0x%llx, %d) failed\n", __func__, start, len); return NULL; } return map; } static void unmap_region(void *addr, u64 start, int len) { iounmap(addr); release_mem_region((phys_addr_t)start, len); } /* * Unmap the paste address region for a window. */ static void unmap_paste_region(struct pnv_vas_window *window) { int len; u64 busaddr_start; if (window->paste_kaddr) { vas_win_paste_addr(window, &busaddr_start, &len); unmap_region(window->paste_kaddr, busaddr_start, len); window->paste_kaddr = NULL; kfree(window->paste_addr_name); window->paste_addr_name = NULL; } } /* * Unmap the MMIO regions for a window. Hold the vas_mutex so we don't * unmap when the window's debugfs dir is in use. This serializes close * of a window even on another VAS instance but since its not a critical * path, just minimize the time we hold the mutex for now. We can add * a per-instance mutex later if necessary. */ static void unmap_winctx_mmio_bars(struct pnv_vas_window *window) { int len; void *uwc_map; void *hvwc_map; u64 busaddr_start; mutex_lock(&vas_mutex); hvwc_map = window->hvwc_map; window->hvwc_map = NULL; uwc_map = window->uwc_map; window->uwc_map = NULL; mutex_unlock(&vas_mutex); if (hvwc_map) { get_hvwc_mmio_bar(window, &busaddr_start, &len); unmap_region(hvwc_map, busaddr_start, len); } if (uwc_map) { get_uwc_mmio_bar(window, &busaddr_start, &len); unmap_region(uwc_map, busaddr_start, len); } } /* * Find the Hypervisor Window Context (HVWC) MMIO Base Address Region and the * OS/User Window Context (UWC) MMIO Base Address Region for the given window. * Map these bus addresses and save the mapped kernel addresses in @window. */ static int map_winctx_mmio_bars(struct pnv_vas_window *window) { int len; u64 start; get_hvwc_mmio_bar(window, &start, &len); window->hvwc_map = map_mmio_region("HVWCM_Window", start, len); get_uwc_mmio_bar(window, &start, &len); window->uwc_map = map_mmio_region("UWCM_Window", start, len); if (!window->hvwc_map || !window->uwc_map) { unmap_winctx_mmio_bars(window); return -1; } return 0; } /* * Reset all valid registers in the HV and OS/User Window Contexts for * the window identified by @window. * * NOTE: We cannot really use a for loop to reset window context. Not all * offsets in a window context are valid registers and the valid * registers are not sequential. And, we can only write to offsets * with valid registers. */ static void reset_window_regs(struct pnv_vas_window *window) { write_hvwc_reg(window, VREG(LPID), 0ULL); write_hvwc_reg(window, VREG(PID), 0ULL); write_hvwc_reg(window, VREG(XLATE_MSR), 0ULL); write_hvwc_reg(window, VREG(XLATE_LPCR), 0ULL); write_hvwc_reg(window, VREG(XLATE_CTL), 0ULL); write_hvwc_reg(window, VREG(AMR), 0ULL); write_hvwc_reg(window, VREG(SEIDR), 0ULL); write_hvwc_reg(window, VREG(FAULT_TX_WIN), 0ULL); write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL); write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), 0ULL); write_hvwc_reg(window, VREG(PSWID), 0ULL); write_hvwc_reg(window, VREG(LFIFO_BAR), 0ULL); write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), 0ULL); write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), 0ULL); write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL); write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL); write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL); write_hvwc_reg(window, VREG(LRX_WCRED), 0ULL); write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL); write_hvwc_reg(window, VREG(TX_WCRED), 0ULL); write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL); write_hvwc_reg(window, VREG(LFIFO_SIZE), 0ULL); write_hvwc_reg(window, VREG(WINCTL), 0ULL); write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL); write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), 0ULL); write_hvwc_reg(window, VREG(TX_RSVD_BUF_COUNT), 0ULL); write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), 0ULL); write_hvwc_reg(window, VREG(LNOTIFY_CTL), 0ULL); write_hvwc_reg(window, VREG(LNOTIFY_PID), 0ULL); write_hvwc_reg(window, VREG(LNOTIFY_LPID), 0ULL); write_hvwc_reg(window, VREG(LNOTIFY_TID), 0ULL); write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), 0ULL); write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL); /* Skip read-only registers: NX_UTIL and NX_UTIL_SE */ /* * The send and receive window credit adder registers are also * accessible from HVWC and have been initialized above. We don't * need to initialize from the OS/User Window Context, so skip * following calls: * * write_uwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL); * write_uwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL); */ } /* * Initialize window context registers related to Address Translation. * These registers are common to send/receive windows although they * differ for user/kernel windows. As we resolve the TODOs we may * want to add fields to vas_winctx and move the initialization to * init_vas_winctx_regs(). */ static void init_xlate_regs(struct pnv_vas_window *window, bool user_win) { u64 lpcr, val; /* * MSR_TA, MSR_US are false for both kernel and user. * MSR_DR and MSR_PR are false for kernel. */ val = 0ULL; val = SET_FIELD(VAS_XLATE_MSR_HV, val, 1); val = SET_FIELD(VAS_XLATE_MSR_SF, val, 1); if (user_win) { val = SET_FIELD(VAS_XLATE_MSR_DR, val, 1); val = SET_FIELD(VAS_XLATE_MSR_PR, val, 1); } write_hvwc_reg(window, VREG(XLATE_MSR), val); lpcr = mfspr(SPRN_LPCR); val = 0ULL; /* * NOTE: From Section 5.7.8.1 Segment Lookaside Buffer of the * Power ISA, v3.0B, Page size encoding is 0 = 4KB, 5 = 64KB. * * NOTE: From Section 1.3.1, Address Translation Context of the * Nest MMU Workbook, LPCR_SC should be 0 for Power9. */ val = SET_FIELD(VAS_XLATE_LPCR_PAGE_SIZE, val, 5); val = SET_FIELD(VAS_XLATE_LPCR_ISL, val, lpcr & LPCR_ISL); val = SET_FIELD(VAS_XLATE_LPCR_TC, val, lpcr & LPCR_TC); val = SET_FIELD(VAS_XLATE_LPCR_SC, val, 0); write_hvwc_reg(window, VREG(XLATE_LPCR), val); /* * Section 1.3.1 (Address translation Context) of NMMU workbook. * 0b00 Hashed Page Table mode * 0b01 Reserved * 0b10 Radix on HPT * 0b11 Radix on Radix */ val = 0ULL; val = SET_FIELD(VAS_XLATE_MODE, val, radix_enabled() ? 3 : 2); write_hvwc_reg(window, VREG(XLATE_CTL), val); /* * TODO: Can we mfspr(AMR) even for user windows? */ val = 0ULL; val = SET_FIELD(VAS_AMR, val, mfspr(SPRN_AMR)); write_hvwc_reg(window, VREG(AMR), val); val = 0ULL; val = SET_FIELD(VAS_SEIDR, val, 0); write_hvwc_reg(window, VREG(SEIDR), val); } /* * Initialize Reserved Send Buffer Count for the send window. It involves * writing to the register, reading it back to confirm that the hardware * has enough buffers to reserve. See section 1.3.1.2.1 of VAS workbook. * * Since we can only make a best-effort attempt to fulfill the request, * we don't return any errors if we cannot. * * TODO: Reserved (aka dedicated) send buffers are not supported yet. */ static void init_rsvd_tx_buf_count(struct pnv_vas_window *txwin, struct vas_winctx *winctx) { write_hvwc_reg(txwin, VREG(TX_RSVD_BUF_COUNT), 0ULL); } /* * init_winctx_regs() * Initialize window context registers for a receive window. * Except for caching control and marking window open, the registers * are initialized in the order listed in Section 3.1.4 (Window Context * Cache Register Details) of the VAS workbook although they don't need * to be. * * Design note: For NX receive windows, NX allocates the FIFO buffer in OPAL * (so that it can get a large contiguous area) and passes that buffer * to kernel via device tree. We now write that buffer address to the * FIFO BAR. Would it make sense to do this all in OPAL? i.e have OPAL * write the per-chip RX FIFO addresses to the windows during boot-up * as a one-time task? That could work for NX but what about other * receivers? Let the receivers tell us the rx-fifo buffers for now. */ static void init_winctx_regs(struct pnv_vas_window *window, struct vas_winctx *winctx) { u64 val; int fifo_size; reset_window_regs(window); val = 0ULL; val = SET_FIELD(VAS_LPID, val, winctx->lpid); write_hvwc_reg(window, VREG(LPID), val); val = 0ULL; val = SET_FIELD(VAS_PID_ID, val, winctx->pidr); write_hvwc_reg(window, VREG(PID), val); init_xlate_regs(window, winctx->user_win); val = 0ULL; val = SET_FIELD(VAS_FAULT_TX_WIN, val, winctx->fault_win_id); write_hvwc_reg(window, VREG(FAULT_TX_WIN), val); /* In PowerNV, interrupts go to HV. */ write_hvwc_reg(window, VREG(OSU_INTR_SRC_RA), 0ULL); val = 0ULL; val = SET_FIELD(VAS_HV_INTR_SRC_RA, val, winctx->irq_port); write_hvwc_reg(window, VREG(HV_INTR_SRC_RA), val); val = 0ULL; val = SET_FIELD(VAS_PSWID_EA_HANDLE, val, winctx->pswid); write_hvwc_reg(window, VREG(PSWID), val); write_hvwc_reg(window, VREG(SPARE1), 0ULL); write_hvwc_reg(window, VREG(SPARE2), 0ULL); write_hvwc_reg(window, VREG(SPARE3), 0ULL); /* * NOTE: VAS expects the FIFO address to be copied into the LFIFO_BAR * register as is - do NOT shift the address into VAS_LFIFO_BAR * bit fields! Ok to set the page migration select fields - * VAS ignores the lower 10+ bits in the address anyway, because * the minimum FIFO size is 1K? * * See also: Design note in function header. */ val = winctx->rx_fifo; val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0); write_hvwc_reg(window, VREG(LFIFO_BAR), val); val = 0ULL; val = SET_FIELD(VAS_LDATA_STAMP, val, winctx->data_stamp); write_hvwc_reg(window, VREG(LDATA_STAMP_CTL), val); val = 0ULL; val = SET_FIELD(VAS_LDMA_TYPE, val, winctx->dma_type); val = SET_FIELD(VAS_LDMA_FIFO_DISABLE, val, winctx->fifo_disable); write_hvwc_reg(window, VREG(LDMA_CACHE_CTL), val); write_hvwc_reg(window, VREG(LRFIFO_PUSH), 0ULL); write_hvwc_reg(window, VREG(CURR_MSG_COUNT), 0ULL); write_hvwc_reg(window, VREG(LNOTIFY_AFTER_COUNT), 0ULL); val = 0ULL; val = SET_FIELD(VAS_LRX_WCRED, val, winctx->wcreds_max); write_hvwc_reg(window, VREG(LRX_WCRED), val); val = 0ULL; val = SET_FIELD(VAS_TX_WCRED, val, winctx->wcreds_max); write_hvwc_reg(window, VREG(TX_WCRED), val); write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), 0ULL); write_hvwc_reg(window, VREG(TX_WCRED_ADDER), 0ULL); fifo_size = winctx->rx_fifo_size / 1024; val = 0ULL; val = SET_FIELD(VAS_LFIFO_SIZE, val, ilog2(fifo_size)); write_hvwc_reg(window, VREG(LFIFO_SIZE), val); /* Update window control and caching control registers last so * we mark the window open only after fully initializing it and * pushing context to cache. */ write_hvwc_reg(window, VREG(WIN_STATUS), 0ULL); init_rsvd_tx_buf_count(window, winctx); /* for a send window, point to the matching receive window */ val = 0ULL; val = SET_FIELD(VAS_LRX_WIN_ID, val, winctx->rx_win_id); write_hvwc_reg(window, VREG(LRFIFO_WIN_PTR), val); write_hvwc_reg(window, VREG(SPARE4), 0ULL); val = 0ULL; val = SET_FIELD(VAS_NOTIFY_DISABLE, val, winctx->notify_disable); val = SET_FIELD(VAS_INTR_DISABLE, val, winctx->intr_disable); val = SET_FIELD(VAS_NOTIFY_EARLY, val, winctx->notify_early); val = SET_FIELD(VAS_NOTIFY_OSU_INTR, val, winctx->notify_os_intr_reg); write_hvwc_reg(window, VREG(LNOTIFY_CTL), val); val = 0ULL; val = SET_FIELD(VAS_LNOTIFY_PID, val, winctx->lnotify_pid); write_hvwc_reg(window, VREG(LNOTIFY_PID), val); val = 0ULL; val = SET_FIELD(VAS_LNOTIFY_LPID, val, winctx->lnotify_lpid); write_hvwc_reg(window, VREG(LNOTIFY_LPID), val); val = 0ULL; val = SET_FIELD(VAS_LNOTIFY_TID, val, winctx->lnotify_tid); write_hvwc_reg(window, VREG(LNOTIFY_TID), val); val = 0ULL; val = SET_FIELD(VAS_LNOTIFY_MIN_SCOPE, val, winctx->min_scope); val = SET_FIELD(VAS_LNOTIFY_MAX_SCOPE, val, winctx->max_scope); write_hvwc_reg(window, VREG(LNOTIFY_SCOPE), val); /* Skip read-only registers NX_UTIL and NX_UTIL_SE */ write_hvwc_reg(window, VREG(SPARE5), 0ULL); write_hvwc_reg(window, VREG(NX_UTIL_ADDER), 0ULL); write_hvwc_reg(window, VREG(SPARE6), 0ULL); /* Finally, push window context to memory and... */ val = 0ULL; val = SET_FIELD(VAS_PUSH_TO_MEM, val, 1); write_hvwc_reg(window, VREG(WIN_CTX_CACHING_CTL), val); /* ... mark the window open for business */ val = 0ULL; val = SET_FIELD(VAS_WINCTL_REJ_NO_CREDIT, val, winctx->rej_no_credit); val = SET_FIELD(VAS_WINCTL_PIN, val, winctx->pin_win); val = SET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val, winctx->tx_wcred_mode); val = SET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val, winctx->rx_wcred_mode); val = SET_FIELD(VAS_WINCTL_TX_WORD_MODE, val, winctx->tx_word_mode); val = SET_FIELD(VAS_WINCTL_RX_WORD_MODE, val, winctx->rx_word_mode); val = SET_FIELD(VAS_WINCTL_FAULT_WIN, val, winctx->fault_win); val = SET_FIELD(VAS_WINCTL_NX_WIN, val, winctx->nx_win); val = SET_FIELD(VAS_WINCTL_OPEN, val, 1); write_hvwc_reg(window, VREG(WINCTL), val); } static void vas_release_window_id(struct ida *ida, int winid) { ida_free(ida, winid); } static int vas_assign_window_id(struct ida *ida) { int winid = ida_alloc_max(ida, VAS_WINDOWS_PER_CHIP - 1, GFP_KERNEL); if (winid == -ENOSPC) { pr_err("Too many (%d) open windows\n", VAS_WINDOWS_PER_CHIP); return -EAGAIN; } return winid; } static void vas_window_free(struct pnv_vas_window *window) { struct vas_instance *vinst = window->vinst; int winid = window->vas_win.winid; unmap_winctx_mmio_bars(window); vas_window_free_dbgdir(window); kfree(window); vas_release_window_id(&vinst->ida, winid); } static struct pnv_vas_window *vas_window_alloc(struct vas_instance *vinst) { int winid; struct pnv_vas_window *window; winid = vas_assign_window_id(&vinst->ida); if (winid < 0) return ERR_PTR(winid); window = kzalloc(sizeof(*window), GFP_KERNEL); if (!window) goto out_free; window->vinst = vinst; window->vas_win.winid = winid; if (map_winctx_mmio_bars(window)) goto out_free; vas_window_init_dbgdir(window); return window; out_free: kfree(window); vas_release_window_id(&vinst->ida, winid); return ERR_PTR(-ENOMEM); } static void put_rx_win(struct pnv_vas_window *rxwin) { /* Better not be a send window! */ WARN_ON_ONCE(rxwin->tx_win); atomic_dec(&rxwin->num_txwins); } /* * Find the user space receive window given the @pswid. * - We must have a valid vasid and it must belong to this instance. * (so both send and receive windows are on the same VAS instance) * - The window must refer to an OPEN, FTW, RECEIVE window. * * NOTE: We access ->windows[] table and assume that vinst->mutex is held. */ static struct pnv_vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid) { int vasid, winid; struct pnv_vas_window *rxwin; decode_pswid(pswid, &vasid, &winid); if (vinst->vas_id != vasid) return ERR_PTR(-EINVAL); rxwin = vinst->windows[winid]; if (!rxwin || rxwin->tx_win || rxwin->vas_win.cop != VAS_COP_TYPE_FTW) return ERR_PTR(-EINVAL); return rxwin; } /* * Get the VAS receive window associated with NX engine identified * by @cop and if applicable, @pswid. * * See also function header of set_vinst_win(). */ static struct pnv_vas_window *get_vinst_rxwin(struct vas_instance *vinst, enum vas_cop_type cop, u32 pswid) { struct pnv_vas_window *rxwin; mutex_lock(&vinst->mutex); if (cop == VAS_COP_TYPE_FTW) rxwin = get_user_rxwin(vinst, pswid); else rxwin = vinst->rxwin[cop] ?: ERR_PTR(-EINVAL); if (!IS_ERR(rxwin)) atomic_inc(&rxwin->num_txwins); mutex_unlock(&vinst->mutex); return rxwin; } /* * We have two tables of windows in a VAS instance. The first one, * ->windows[], contains all the windows in the instance and allows * looking up a window by its id. It is used to look up send windows * during fault handling and receive windows when pairing user space * send/receive windows. * * The second table, ->rxwin[], contains receive windows that are * associated with NX engines. This table has VAS_COP_TYPE_MAX * entries and is used to look up a receive window by its * coprocessor type. * * Here, we save @window in the ->windows[] table. If it is a receive * window, we also save the window in the ->rxwin[] table. */ static void set_vinst_win(struct vas_instance *vinst, struct pnv_vas_window *window) { int id = window->vas_win.winid; mutex_lock(&vinst->mutex); /* * There should only be one receive window for a coprocessor type * unless its a user (FTW) window. */ if (!window->user_win && !window->tx_win) { WARN_ON_ONCE(vinst->rxwin[window->vas_win.cop]); vinst->rxwin[window->vas_win.cop] = window; } WARN_ON_ONCE(vinst->windows[id] != NULL); vinst->windows[id] = window; mutex_unlock(&vinst->mutex); } /* * Clear this window from the table(s) of windows for this VAS instance. * See also function header of set_vinst_win(). */ static void clear_vinst_win(struct pnv_vas_window *window) { int id = window->vas_win.winid; struct vas_instance *vinst = window->vinst; mutex_lock(&vinst->mutex); if (!window->user_win && !window->tx_win) { WARN_ON_ONCE(!vinst->rxwin[window->vas_win.cop]); vinst->rxwin[window->vas_win.cop] = NULL; } WARN_ON_ONCE(vinst->windows[id] != window); vinst->windows[id] = NULL; mutex_unlock(&vinst->mutex); } static void init_winctx_for_rxwin(struct pnv_vas_window *rxwin, struct vas_rx_win_attr *rxattr, struct vas_winctx *winctx) { /* * We first zero (memset()) all fields and only set non-zero fields. * Following fields are 0/false but maybe deserve a comment: * * ->notify_os_intr_reg In powerNV, send intrs to HV * ->notify_disable False for NX windows * ->intr_disable False for Fault Windows * ->xtra_write False for NX windows * ->notify_early NA for NX windows * ->rsvd_txbuf_count NA for Rx windows * ->lpid, ->pid, ->tid NA for Rx windows */ memset(winctx, 0, sizeof(struct vas_winctx)); winctx->rx_fifo = rxattr->rx_fifo; winctx->rx_fifo_size = rxattr->rx_fifo_size; winctx->wcreds_max = rxwin->vas_win.wcreds_max; winctx->pin_win = rxattr->pin_win; winctx->nx_win = rxattr->nx_win; winctx->fault_win = rxattr->fault_win; winctx->user_win = rxattr->user_win; winctx->rej_no_credit = rxattr->rej_no_credit; winctx->rx_word_mode = rxattr->rx_win_ord_mode; winctx->tx_word_mode = rxattr->tx_win_ord_mode; winctx->rx_wcred_mode = rxattr->rx_wcred_mode; winctx->tx_wcred_mode = rxattr->tx_wcred_mode; winctx->notify_early = rxattr->notify_early; if (winctx->nx_win) { winctx->data_stamp = true; winctx->intr_disable = true; winctx->pin_win = true; WARN_ON_ONCE(winctx->fault_win); WARN_ON_ONCE(!winctx->rx_word_mode); WARN_ON_ONCE(!winctx->tx_word_mode); WARN_ON_ONCE(winctx->notify_after_count); } else if (winctx->fault_win) { winctx->notify_disable = true; } else if (winctx->user_win) { /* * Section 1.8.1 Low Latency Core-Core Wake up of * the VAS workbook: * * - disable credit checks ([tr]x_wcred_mode = false) * - disable FIFO writes * - enable ASB_Notify, disable interrupt */ winctx->fifo_disable = true; winctx->intr_disable = true; winctx->rx_fifo = 0; } winctx->lnotify_lpid = rxattr->lnotify_lpid; winctx->lnotify_pid = rxattr->lnotify_pid; winctx->lnotify_tid = rxattr->lnotify_tid; winctx->pswid = rxattr->pswid; winctx->dma_type = VAS_DMA_TYPE_INJECT; winctx->tc_mode = rxattr->tc_mode; winctx->min_scope = VAS_SCOPE_LOCAL; winctx->max_scope = VAS_SCOPE_VECTORED_GROUP; if (rxwin->vinst->virq) winctx->irq_port = rxwin->vinst->irq_port; } static bool rx_win_args_valid(enum vas_cop_type cop, struct vas_rx_win_attr *attr) { pr_debug("Rxattr: fault %d, notify %d, intr %d, early %d, fifo %d\n", attr->fault_win, attr->notify_disable, attr->intr_disable, attr->notify_early, attr->rx_fifo_size); if (cop >= VAS_COP_TYPE_MAX) return false; if (cop != VAS_COP_TYPE_FTW && attr->rx_fifo_size < VAS_RX_FIFO_SIZE_MIN) return false; if (attr->rx_fifo_size > VAS_RX_FIFO_SIZE_MAX) return false; if (!attr->wcreds_max) return false; if (attr->nx_win) { /* cannot be fault or user window if it is nx */ if (attr->fault_win || attr->user_win) return false; /* * Section 3.1.4.32: NX Windows must not disable notification, * and must not enable interrupts or early notification. */ if (attr->notify_disable || !attr->intr_disable || attr->notify_early) return false; } else if (attr->fault_win) { /* cannot be both fault and user window */ if (attr->user_win) return false; /* * Section 3.1.4.32: Fault windows must disable notification * but not interrupts. */ if (!attr->notify_disable || attr->intr_disable) return false; } else if (attr->user_win) { /* * User receive windows are only for fast-thread-wakeup * (FTW). They don't need a FIFO and must disable interrupts */ if (attr->rx_fifo || attr->rx_fifo_size || !attr->intr_disable) return false; } else { /* Rx window must be one of NX or Fault or User window. */ return false; } return true; } void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop) { memset(rxattr, 0, sizeof(*rxattr)); if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI || cop == VAS_COP_TYPE_GZIP || cop == VAS_COP_TYPE_GZIP_HIPRI) { rxattr->pin_win = true; rxattr->nx_win = true; rxattr->fault_win = false; rxattr->intr_disable = true; rxattr->rx_wcred_mode = true; rxattr->tx_wcred_mode = true; rxattr->rx_win_ord_mode = true; rxattr->tx_win_ord_mode = true; } else if (cop == VAS_COP_TYPE_FAULT) { rxattr->pin_win = true; rxattr->fault_win = true; rxattr->notify_disable = true; rxattr->rx_wcred_mode = true; rxattr->rx_win_ord_mode = true; rxattr->rej_no_credit = true; rxattr->tc_mode = VAS_THRESH_DISABLED; } else if (cop == VAS_COP_TYPE_FTW) { rxattr->user_win = true; rxattr->intr_disable = true; /* * As noted in the VAS Workbook we disable credit checks. * If we enable credit checks in the future, we must also * implement a mechanism to return the user credits or new * paste operations will fail. */ } } EXPORT_SYMBOL_GPL(vas_init_rx_win_attr); struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop, struct vas_rx_win_attr *rxattr) { struct pnv_vas_window *rxwin; struct vas_winctx winctx; struct vas_instance *vinst; trace_vas_rx_win_open(current, vasid, cop, rxattr); if (!rx_win_args_valid(cop, rxattr)) return ERR_PTR(-EINVAL); vinst = find_vas_instance(vasid); if (!vinst) { pr_devel("vasid %d not found!\n", vasid); return ERR_PTR(-EINVAL); } pr_devel("Found instance %d\n", vasid); rxwin = vas_window_alloc(vinst); if (IS_ERR(rxwin)) { pr_devel("Unable to allocate memory for Rx window\n"); return (struct vas_window *)rxwin; } rxwin->tx_win = false; rxwin->nx_win = rxattr->nx_win; rxwin->user_win = rxattr->user_win; rxwin->vas_win.cop = cop; rxwin->vas_win.wcreds_max = rxattr->wcreds_max; init_winctx_for_rxwin(rxwin, rxattr, &winctx); init_winctx_regs(rxwin, &winctx); set_vinst_win(vinst, rxwin); return &rxwin->vas_win; } EXPORT_SYMBOL_GPL(vas_rx_win_open); void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop) { memset(txattr, 0, sizeof(*txattr)); if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI || cop == VAS_COP_TYPE_GZIP || cop == VAS_COP_TYPE_GZIP_HIPRI) { txattr->rej_no_credit = false; txattr->rx_wcred_mode = true; txattr->tx_wcred_mode = true; txattr->rx_win_ord_mode = true; txattr->tx_win_ord_mode = true; } else if (cop == VAS_COP_TYPE_FTW) { txattr->user_win = true; } } EXPORT_SYMBOL_GPL(vas_init_tx_win_attr); static void init_winctx_for_txwin(struct pnv_vas_window *txwin, struct vas_tx_win_attr *txattr, struct vas_winctx *winctx) { /* * We first zero all fields and only set non-zero ones. Following * are some fields set to 0/false for the stated reason: * * ->notify_os_intr_reg In powernv, send intrs to HV * ->rsvd_txbuf_count Not supported yet. * ->notify_disable False for NX windows * ->xtra_write False for NX windows * ->notify_early NA for NX windows * ->lnotify_lpid NA for Tx windows * ->lnotify_pid NA for Tx windows * ->lnotify_tid NA for Tx windows * ->tx_win_cred_mode Ignore for now for NX windows * ->rx_win_cred_mode Ignore for now for NX windows */ memset(winctx, 0, sizeof(struct vas_winctx)); winctx->wcreds_max = txwin->vas_win.wcreds_max; winctx->user_win = txattr->user_win; winctx->nx_win = txwin->rxwin->nx_win; winctx->pin_win = txattr->pin_win; winctx->rej_no_credit = txattr->rej_no_credit; winctx->rsvd_txbuf_enable = txattr->rsvd_txbuf_enable; winctx->rx_wcred_mode = txattr->rx_wcred_mode; winctx->tx_wcred_mode = txattr->tx_wcred_mode; winctx->rx_word_mode = txattr->rx_win_ord_mode; winctx->tx_word_mode = txattr->tx_win_ord_mode; winctx->rsvd_txbuf_count = txattr->rsvd_txbuf_count; winctx->intr_disable = true; if (winctx->nx_win) winctx->data_stamp = true; winctx->lpid = txattr->lpid; winctx->pidr = txattr->pidr; winctx->rx_win_id = txwin->rxwin->vas_win.winid; /* * IRQ and fault window setup is successful. Set fault window * for the send window so that ready to handle faults. */ if (txwin->vinst->virq) winctx->fault_win_id = txwin->vinst->fault_win->vas_win.winid; winctx->dma_type = VAS_DMA_TYPE_INJECT; winctx->tc_mode = txattr->tc_mode; winctx->min_scope = VAS_SCOPE_LOCAL; winctx->max_scope = VAS_SCOPE_VECTORED_GROUP; if (txwin->vinst->virq) winctx->irq_port = txwin->vinst->irq_port; winctx->pswid = txattr->pswid ? txattr->pswid : encode_pswid(txwin->vinst->vas_id, txwin->vas_win.winid); } static bool tx_win_args_valid(enum vas_cop_type cop, struct vas_tx_win_attr *attr) { if (attr->tc_mode != VAS_THRESH_DISABLED) return false; if (cop > VAS_COP_TYPE_MAX) return false; if (attr->wcreds_max > VAS_TX_WCREDS_MAX) return false; if (attr->user_win) { if (attr->rsvd_txbuf_count) return false; if (cop != VAS_COP_TYPE_FTW && cop != VAS_COP_TYPE_GZIP && cop != VAS_COP_TYPE_GZIP_HIPRI) return false; } return true; } struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop, struct vas_tx_win_attr *attr) { int rc; struct pnv_vas_window *txwin; struct pnv_vas_window *rxwin; struct vas_winctx winctx; struct vas_instance *vinst; trace_vas_tx_win_open(current, vasid, cop, attr); if (!tx_win_args_valid(cop, attr)) return ERR_PTR(-EINVAL); /* * If caller did not specify a vasid but specified the PSWID of a * receive window (applicable only to FTW windows), use the vasid * from that receive window. */ if (vasid == -1 && attr->pswid) decode_pswid(attr->pswid, &vasid, NULL); vinst = find_vas_instance(vasid); if (!vinst) { pr_devel("vasid %d not found!\n", vasid); return ERR_PTR(-EINVAL); } rxwin = get_vinst_rxwin(vinst, cop, attr->pswid); if (IS_ERR(rxwin)) { pr_devel("No RxWin for vasid %d, cop %d\n", vasid, cop); return (struct vas_window *)rxwin; } txwin = vas_window_alloc(vinst); if (IS_ERR(txwin)) { rc = PTR_ERR(txwin); goto put_rxwin; } txwin->vas_win.cop = cop; txwin->tx_win = 1; txwin->rxwin = rxwin; txwin->nx_win = txwin->rxwin->nx_win; txwin->user_win = attr->user_win; txwin->vas_win.wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT; init_winctx_for_txwin(txwin, attr, &winctx); init_winctx_regs(txwin, &winctx); /* * If its a kernel send window, map the window address into the * kernel's address space. For user windows, user must issue an * mmap() to map the window into their address space. * * NOTE: If kernel ever resubmits a user CRB after handling a page * fault, we will need to map this into kernel as well. */ if (!txwin->user_win) { txwin->paste_kaddr = map_paste_region(txwin); if (IS_ERR(txwin->paste_kaddr)) { rc = PTR_ERR(txwin->paste_kaddr); goto free_window; } } else { /* * Interrupt hanlder or fault window setup failed. Means * NX can not generate fault for page fault. So not * opening for user space tx window. */ if (!vinst->virq) { rc = -ENODEV; goto free_window; } rc = get_vas_user_win_ref(&txwin->vas_win.task_ref); if (rc) goto free_window; vas_user_win_add_mm_context(&txwin->vas_win.task_ref); } set_vinst_win(vinst, txwin); return &txwin->vas_win; free_window: vas_window_free(txwin); put_rxwin: put_rx_win(rxwin); return ERR_PTR(rc); } EXPORT_SYMBOL_GPL(vas_tx_win_open); int vas_copy_crb(void *crb, int offset) { return vas_copy(crb, offset); } EXPORT_SYMBOL_GPL(vas_copy_crb); #define RMA_LSMP_REPORT_ENABLE PPC_BIT(53) int vas_paste_crb(struct vas_window *vwin, int offset, bool re) { struct pnv_vas_window *txwin; int rc; void *addr; uint64_t val; txwin = container_of(vwin, struct pnv_vas_window, vas_win); trace_vas_paste_crb(current, txwin); /* * Only NX windows are supported for now and hardware assumes * report-enable flag is set for NX windows. Ensure software * complies too. */ WARN_ON_ONCE(txwin->nx_win && !re); addr = txwin->paste_kaddr; if (re) { /* * Set the REPORT_ENABLE bit (equivalent to writing * to 1K offset of the paste address) */ val = SET_FIELD(RMA_LSMP_REPORT_ENABLE, 0ULL, 1); addr += val; } /* * Map the raw CR value from vas_paste() to an error code (there * is just pass or fail for now though). */ rc = vas_paste(addr, offset); if (rc == 2) rc = 0; else rc = -EINVAL; pr_debug("Txwin #%d: Msg count %llu\n", txwin->vas_win.winid, read_hvwc_reg(txwin, VREG(LRFIFO_PUSH))); return rc; } EXPORT_SYMBOL_GPL(vas_paste_crb); /* * If credit checking is enabled for this window, poll for the return * of window credits (i.e for NX engines to process any outstanding CRBs). * Since NX-842 waits for the CRBs to be processed before closing the * window, we should not have to wait for too long. * * TODO: We retry in 10ms intervals now. We could/should probably peek at * the VAS_LRFIFO_PUSH_OFFSET register to get an estimate of pending * CRBs on the FIFO and compute the delay dynamically on each retry. * But that is not really needed until we support NX-GZIP access from * user space. (NX-842 driver waits for CSB and Fast thread-wakeup * doesn't use credit checking). */ static void poll_window_credits(struct pnv_vas_window *window) { u64 val; int creds, mode; int count = 0; val = read_hvwc_reg(window, VREG(WINCTL)); if (window->tx_win) mode = GET_FIELD(VAS_WINCTL_TX_WCRED_MODE, val); else mode = GET_FIELD(VAS_WINCTL_RX_WCRED_MODE, val); if (!mode) return; retry: if (window->tx_win) { val = read_hvwc_reg(window, VREG(TX_WCRED)); creds = GET_FIELD(VAS_TX_WCRED, val); } else { val = read_hvwc_reg(window, VREG(LRX_WCRED)); creds = GET_FIELD(VAS_LRX_WCRED, val); } /* * Takes around few milliseconds to complete all pending requests * and return credits. * TODO: Scan fault FIFO and invalidate CRBs points to this window * and issue CRB Kill to stop all pending requests. Need only * if there is a bug in NX or fault handling in kernel. */ if (creds < window->vas_win.wcreds_max) { val = 0; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(10)); count++; /* * Process can not close send window until all credits are * returned. */ if (!(count % 1000)) pr_warn_ratelimited("VAS: pid %d stuck. Waiting for credits returned for Window(%d). creds %d, Retries %d\n", vas_window_pid(&window->vas_win), window->vas_win.winid, creds, count); goto retry; } } /* * Wait for the window to go to "not-busy" state. It should only take a * short time to queue a CRB, so window should not be busy for too long. * Trying 5ms intervals. */ static void poll_window_busy_state(struct pnv_vas_window *window) { int busy; u64 val; int count = 0; retry: val = read_hvwc_reg(window, VREG(WIN_STATUS)); busy = GET_FIELD(VAS_WIN_BUSY, val); if (busy) { val = 0; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(10)); count++; /* * Takes around few milliseconds to process all pending * requests. */ if (!(count % 1000)) pr_warn_ratelimited("VAS: pid %d stuck. Window (ID=%d) is in busy state. Retries %d\n", vas_window_pid(&window->vas_win), window->vas_win.winid, count); goto retry; } } /* * Have the hardware cast a window out of cache and wait for it to * be completed. * * NOTE: It can take a relatively long time to cast the window context * out of the cache. It is not strictly necessary to cast out if: * * - we clear the "Pin Window" bit (so hardware is free to evict) * * - we re-initialize the window context when it is reassigned. * * We do the former in vas_win_close() and latter in vas_win_open(). * So, ignoring the cast-out for now. We can add it as needed. If * casting out becomes necessary we should consider offloading the * job to a worker thread, so the window close can proceed quickly. */ static void poll_window_castout(struct pnv_vas_window *window) { /* stub for now */ } /* * Unpin and close a window so no new requests are accepted and the * hardware can evict this window from cache if necessary. */ static void unpin_close_window(struct pnv_vas_window *window) { u64 val; val = read_hvwc_reg(window, VREG(WINCTL)); val = SET_FIELD(VAS_WINCTL_PIN, val, 0); val = SET_FIELD(VAS_WINCTL_OPEN, val, 0); write_hvwc_reg(window, VREG(WINCTL), val); } /* * Close a window. * * See Section 1.12.1 of VAS workbook v1.05 for details on closing window: * - Disable new paste operations (unmap paste address) * - Poll for the "Window Busy" bit to be cleared * - Clear the Open/Enable bit for the Window. * - Poll for return of window Credits (implies FIFO empty for Rx win?) * - Unpin and cast window context out of cache * * Besides the hardware, kernel has some bookkeeping of course. */ int vas_win_close(struct vas_window *vwin) { struct pnv_vas_window *window; if (!vwin) return 0; window = container_of(vwin, struct pnv_vas_window, vas_win); if (!window->tx_win && atomic_read(&window->num_txwins) != 0) { pr_devel("Attempting to close an active Rx window!\n"); WARN_ON_ONCE(1); return -EBUSY; } unmap_paste_region(window); poll_window_busy_state(window); unpin_close_window(window); poll_window_credits(window); clear_vinst_win(window); poll_window_castout(window); /* if send window, drop reference to matching receive window */ if (window->tx_win) { if (window->user_win) { mm_context_remove_vas_window(vwin->task_ref.mm); put_vas_user_win_ref(&vwin->task_ref); } put_rx_win(window->rxwin); } vas_window_free(window); return 0; } EXPORT_SYMBOL_GPL(vas_win_close); /* * Return credit for the given window. * Send windows and fault window uses credit mechanism as follows: * * Send windows: * - The default number of credits available for each send window is * 1024. It means 1024 requests can be issued asynchronously at the * same time. If the credit is not available, that request will be * returned with RMA_Busy. * - One credit is taken when NX request is issued. * - This credit is returned after NX processed that request. * - If NX encounters translation error, kernel will return the * credit on the specific send window after processing the fault CRB. * * Fault window: * - The total number credits available is FIFO_SIZE/CRB_SIZE. * Means 4MB/128 in the current implementation. If credit is not * available, RMA_Reject is returned. * - A credit is taken when NX pastes CRB in fault FIFO. * - The kernel with return credit on fault window after reading entry * from fault FIFO. */ void vas_return_credit(struct pnv_vas_window *window, bool tx) { uint64_t val; val = 0ULL; if (tx) { /* send window */ val = SET_FIELD(VAS_TX_WCRED, val, 1); write_hvwc_reg(window, VREG(TX_WCRED_ADDER), val); } else { val = SET_FIELD(VAS_LRX_WCRED, val, 1); write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), val); } } struct pnv_vas_window *vas_pswid_to_window(struct vas_instance *vinst, uint32_t pswid) { struct pnv_vas_window *window; int winid; if (!pswid) { pr_devel("%s: called for pswid 0!\n", __func__); return ERR_PTR(-ESRCH); } decode_pswid(pswid, NULL, &winid); if (winid >= VAS_WINDOWS_PER_CHIP) return ERR_PTR(-ESRCH); /* * If application closes the window before the hardware * returns the fault CRB, we should wait in vas_win_close() * for the pending requests. so the window must be active * and the process alive. * * If its a kernel process, we should not get any faults and * should not get here. */ window = vinst->windows[winid]; if (!window) { pr_err("PSWID decode: Could not find window for winid %d pswid %d vinst 0x%p\n", winid, pswid, vinst); return NULL; } /* * Do some sanity checks on the decoded window. Window should be * NX GZIP user send window. FTW windows should not incur faults * since their CRBs are ignored (not queued on FIFO or processed * by NX). */ if (!window->tx_win || !window->user_win || !window->nx_win || window->vas_win.cop == VAS_COP_TYPE_FAULT || window->vas_win.cop == VAS_COP_TYPE_FTW) { pr_err("PSWID decode: id %d, tx %d, user %d, nx %d, cop %d\n", winid, window->tx_win, window->user_win, window->nx_win, window->vas_win.cop); WARN_ON(1); } return window; } static struct vas_window *vas_user_win_open(int vas_id, u64 flags, enum vas_cop_type cop_type) { struct vas_tx_win_attr txattr = {}; vas_init_tx_win_attr(&txattr, cop_type); txattr.lpid = mfspr(SPRN_LPID); txattr.pidr = mfspr(SPRN_PID); txattr.user_win = true; txattr.rsvd_txbuf_count = false; txattr.pswid = false; pr_devel("Pid %d: Opening txwin, PIDR %ld\n", txattr.pidr, mfspr(SPRN_PID)); return vas_tx_win_open(vas_id, cop_type, &txattr); } static u64 vas_user_win_paste_addr(struct vas_window *txwin) { struct pnv_vas_window *win; u64 paste_addr; win = container_of(txwin, struct pnv_vas_window, vas_win); vas_win_paste_addr(win, &paste_addr, NULL); return paste_addr; } static int vas_user_win_close(struct vas_window *txwin) { vas_win_close(txwin); return 0; } static const struct vas_user_win_ops vops = { .open_win = vas_user_win_open, .paste_addr = vas_user_win_paste_addr, .close_win = vas_user_win_close, }; /* * Supporting only nx-gzip coprocessor type now, but this API code * extended to other coprocessor types later. */ int vas_register_api_powernv(struct module *mod, enum vas_cop_type cop_type, const char *name) { return vas_register_coproc_api(mod, cop_type, name, &vops); } EXPORT_SYMBOL_GPL(vas_register_api_powernv); void vas_unregister_api_powernv(void) { vas_unregister_coproc_api(); } EXPORT_SYMBOL_GPL(vas_unregister_api_powernv);
linux-master
arch/powerpc/platforms/powernv/vas-window.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV OPAL asynchronous completion interfaces * * Copyright 2013-2017 IBM Corp. */ #undef DEBUG #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/gfp.h> #include <linux/of.h> #include <asm/machdep.h> #include <asm/opal.h> enum opal_async_token_state { ASYNC_TOKEN_UNALLOCATED = 0, ASYNC_TOKEN_ALLOCATED, ASYNC_TOKEN_DISPATCHED, ASYNC_TOKEN_ABANDONED, ASYNC_TOKEN_COMPLETED }; struct opal_async_token { enum opal_async_token_state state; struct opal_msg response; }; static DECLARE_WAIT_QUEUE_HEAD(opal_async_wait); static DEFINE_SPINLOCK(opal_async_comp_lock); static struct semaphore opal_async_sem; static unsigned int opal_max_async_tokens; static struct opal_async_token *opal_async_tokens; static int __opal_async_get_token(void) { unsigned long flags; int i, token = -EBUSY; spin_lock_irqsave(&opal_async_comp_lock, flags); for (i = 0; i < opal_max_async_tokens; i++) { if (opal_async_tokens[i].state == ASYNC_TOKEN_UNALLOCATED) { opal_async_tokens[i].state = ASYNC_TOKEN_ALLOCATED; token = i; break; } } spin_unlock_irqrestore(&opal_async_comp_lock, flags); return token; } /* * Note: If the returned token is used in an opal call and opal returns * OPAL_ASYNC_COMPLETION you MUST call one of opal_async_wait_response() or * opal_async_wait_response_interruptible() at least once before calling another * opal_async_* function */ int opal_async_get_token_interruptible(void) { int token; /* Wait until a token is available */ if (down_interruptible(&opal_async_sem)) return -ERESTARTSYS; token = __opal_async_get_token(); if (token < 0) up(&opal_async_sem); return token; } EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible); static int __opal_async_release_token(int token) { unsigned long flags; int rc; if (token < 0 || token >= opal_max_async_tokens) { pr_err("%s: Passed token is out of range, token %d\n", __func__, token); return -EINVAL; } spin_lock_irqsave(&opal_async_comp_lock, flags); switch (opal_async_tokens[token].state) { case ASYNC_TOKEN_COMPLETED: case ASYNC_TOKEN_ALLOCATED: opal_async_tokens[token].state = ASYNC_TOKEN_UNALLOCATED; rc = 0; break; /* * DISPATCHED and ABANDONED tokens must wait for OPAL to respond. * Mark a DISPATCHED token as ABANDONED so that the response handling * code knows no one cares and that it can free it then. */ case ASYNC_TOKEN_DISPATCHED: opal_async_tokens[token].state = ASYNC_TOKEN_ABANDONED; fallthrough; default: rc = 1; } spin_unlock_irqrestore(&opal_async_comp_lock, flags); return rc; } int opal_async_release_token(int token) { int ret; ret = __opal_async_release_token(token); if (!ret) up(&opal_async_sem); return ret; } EXPORT_SYMBOL_GPL(opal_async_release_token); int opal_async_wait_response(uint64_t token, struct opal_msg *msg) { if (token >= opal_max_async_tokens) { pr_err("%s: Invalid token passed\n", __func__); return -EINVAL; } if (!msg) { pr_err("%s: Invalid message pointer passed\n", __func__); return -EINVAL; } /* * There is no need to mark the token as dispatched, wait_event() * will block until the token completes. * * Wakeup the poller before we wait for events to speed things * up on platforms or simulators where the interrupts aren't * functional. */ opal_wake_poller(); wait_event(opal_async_wait, opal_async_tokens[token].state == ASYNC_TOKEN_COMPLETED); memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg)); return 0; } EXPORT_SYMBOL_GPL(opal_async_wait_response); int opal_async_wait_response_interruptible(uint64_t token, struct opal_msg *msg) { unsigned long flags; int ret; if (token >= opal_max_async_tokens) { pr_err("%s: Invalid token passed\n", __func__); return -EINVAL; } if (!msg) { pr_err("%s: Invalid message pointer passed\n", __func__); return -EINVAL; } /* * The first time this gets called we mark the token as DISPATCHED * so that if wait_event_interruptible() returns not zero and the * caller frees the token, we know not to actually free the token * until the response comes. * * Only change if the token is ALLOCATED - it may have been * completed even before the caller gets around to calling this * the first time. * * There is also a dirty great comment at the token allocation * function that if the opal call returns OPAL_ASYNC_COMPLETION to * the caller then the caller *must* call this or the not * interruptible version before doing anything else with the * token. */ if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED) { spin_lock_irqsave(&opal_async_comp_lock, flags); if (opal_async_tokens[token].state == ASYNC_TOKEN_ALLOCATED) opal_async_tokens[token].state = ASYNC_TOKEN_DISPATCHED; spin_unlock_irqrestore(&opal_async_comp_lock, flags); } /* * Wakeup the poller before we wait for events to speed things * up on platforms or simulators where the interrupts aren't * functional. */ opal_wake_poller(); ret = wait_event_interruptible(opal_async_wait, opal_async_tokens[token].state == ASYNC_TOKEN_COMPLETED); if (!ret) memcpy(msg, &opal_async_tokens[token].response, sizeof(*msg)); return ret; } EXPORT_SYMBOL_GPL(opal_async_wait_response_interruptible); /* Called from interrupt context */ static int opal_async_comp_event(struct notifier_block *nb, unsigned long msg_type, void *msg) { struct opal_msg *comp_msg = msg; enum opal_async_token_state state; unsigned long flags; uint64_t token; if (msg_type != OPAL_MSG_ASYNC_COMP) return 0; token = be64_to_cpu(comp_msg->params[0]); spin_lock_irqsave(&opal_async_comp_lock, flags); state = opal_async_tokens[token].state; opal_async_tokens[token].state = ASYNC_TOKEN_COMPLETED; spin_unlock_irqrestore(&opal_async_comp_lock, flags); if (state == ASYNC_TOKEN_ABANDONED) { /* Free the token, no one else will */ opal_async_release_token(token); return 0; } memcpy(&opal_async_tokens[token].response, comp_msg, sizeof(*comp_msg)); wake_up(&opal_async_wait); return 0; } static struct notifier_block opal_async_comp_nb = { .notifier_call = opal_async_comp_event, .next = NULL, .priority = 0, }; int __init opal_async_comp_init(void) { struct device_node *opal_node; const __be32 *async; int err; opal_node = of_find_node_by_path("/ibm,opal"); if (!opal_node) { pr_err("%s: Opal node not found\n", __func__); err = -ENOENT; goto out; } async = of_get_property(opal_node, "opal-msg-async-num", NULL); if (!async) { pr_err("%s: %pOF has no opal-msg-async-num\n", __func__, opal_node); err = -ENOENT; goto out_opal_node; } opal_max_async_tokens = be32_to_cpup(async); opal_async_tokens = kcalloc(opal_max_async_tokens, sizeof(*opal_async_tokens), GFP_KERNEL); if (!opal_async_tokens) { err = -ENOMEM; goto out_opal_node; } err = opal_message_notifier_register(OPAL_MSG_ASYNC_COMP, &opal_async_comp_nb); if (err) { pr_err("%s: Can't register OPAL event notifier (%d)\n", __func__, err); kfree(opal_async_tokens); goto out_opal_node; } sema_init(&opal_async_sem, opal_max_async_tokens); out_opal_node: of_node_put(opal_node); out: return err; }
linux-master
arch/powerpc/platforms/powernv/opal-async.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * PowerNV OPAL Powercap interface * * Copyright 2017 IBM Corp. */ #define pr_fmt(fmt) "opal-powercap: " fmt #include <linux/of.h> #include <linux/kobject.h> #include <linux/slab.h> #include <asm/opal.h> static DEFINE_MUTEX(powercap_mutex); static struct kobject *powercap_kobj; struct powercap_attr { u32 handle; struct kobj_attribute attr; }; static struct pcap { struct attribute_group pg; struct powercap_attr *pattrs; } *pcaps; static ssize_t powercap_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct powercap_attr *pcap_attr = container_of(attr, struct powercap_attr, attr); struct opal_msg msg; u32 pcap; int ret, token; token = opal_async_get_token_interruptible(); if (token < 0) { pr_devel("Failed to get token\n"); return token; } ret = mutex_lock_interruptible(&powercap_mutex); if (ret) goto out_token; ret = opal_get_powercap(pcap_attr->handle, token, (u32 *)__pa(&pcap)); switch (ret) { case OPAL_ASYNC_COMPLETION: ret = opal_async_wait_response(token, &msg); if (ret) { pr_devel("Failed to wait for the async response\n"); ret = -EIO; goto out; } ret = opal_error_code(opal_get_async_rc(msg)); if (!ret) { ret = sprintf(buf, "%u\n", be32_to_cpu(pcap)); if (ret < 0) ret = -EIO; } break; case OPAL_SUCCESS: ret = sprintf(buf, "%u\n", be32_to_cpu(pcap)); if (ret < 0) ret = -EIO; break; default: ret = opal_error_code(ret); } out: mutex_unlock(&powercap_mutex); out_token: opal_async_release_token(token); return ret; } static ssize_t powercap_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { struct powercap_attr *pcap_attr = container_of(attr, struct powercap_attr, attr); struct opal_msg msg; u32 pcap; int ret, token; ret = kstrtoint(buf, 0, &pcap); if (ret) return ret; token = opal_async_get_token_interruptible(); if (token < 0) { pr_devel("Failed to get token\n"); return token; } ret = mutex_lock_interruptible(&powercap_mutex); if (ret) goto out_token; ret = opal_set_powercap(pcap_attr->handle, token, pcap); switch (ret) { case OPAL_ASYNC_COMPLETION: ret = opal_async_wait_response(token, &msg); if (ret) { pr_devel("Failed to wait for the async response\n"); ret = -EIO; goto out; } ret = opal_error_code(opal_get_async_rc(msg)); if (!ret) ret = count; break; case OPAL_SUCCESS: ret = count; break; default: ret = opal_error_code(ret); } out: mutex_unlock(&powercap_mutex); out_token: opal_async_release_token(token); return ret; } static void __init powercap_add_attr(int handle, const char *name, struct powercap_attr *attr) { attr->handle = handle; sysfs_attr_init(&attr->attr.attr); attr->attr.attr.name = name; attr->attr.attr.mode = 0444; attr->attr.show = powercap_show; } void __init opal_powercap_init(void) { struct device_node *powercap, *node; int i = 0; powercap = of_find_compatible_node(NULL, NULL, "ibm,opal-powercap"); if (!powercap) { pr_devel("Powercap node not found\n"); return; } pcaps = kcalloc(of_get_child_count(powercap), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) goto out_put_powercap; powercap_kobj = kobject_create_and_add("powercap", opal_kobj); if (!powercap_kobj) { pr_warn("Failed to create powercap kobject\n"); goto out_pcaps; } i = 0; for_each_child_of_node(powercap, node) { u32 cur, min, max; int j = 0; bool has_cur = false, has_min = false, has_max = false; if (!of_property_read_u32(node, "powercap-min", &min)) { j++; has_min = true; } if (!of_property_read_u32(node, "powercap-max", &max)) { j++; has_max = true; } if (!of_property_read_u32(node, "powercap-current", &cur)) { j++; has_cur = true; } pcaps[i].pattrs = kcalloc(j, sizeof(struct powercap_attr), GFP_KERNEL); if (!pcaps[i].pattrs) goto out_pcaps_pattrs; pcaps[i].pg.attrs = kcalloc(j + 1, sizeof(struct attribute *), GFP_KERNEL); if (!pcaps[i].pg.attrs) { kfree(pcaps[i].pattrs); goto out_pcaps_pattrs; } j = 0; pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node); if (has_min) { powercap_add_attr(min, "powercap-min", &pcaps[i].pattrs[j]); pcaps[i].pg.attrs[j] = &pcaps[i].pattrs[j].attr.attr; j++; } if (has_max) { powercap_add_attr(max, "powercap-max", &pcaps[i].pattrs[j]); pcaps[i].pg.attrs[j] = &pcaps[i].pattrs[j].attr.attr; j++; } if (has_cur) { powercap_add_attr(cur, "powercap-current", &pcaps[i].pattrs[j]); pcaps[i].pattrs[j].attr.attr.mode |= 0220; pcaps[i].pattrs[j].attr.store = powercap_store; pcaps[i].pg.attrs[j] = &pcaps[i].pattrs[j].attr.attr; j++; } if (sysfs_create_group(powercap_kobj, &pcaps[i].pg)) { pr_warn("Failed to create powercap attribute group %s\n", pcaps[i].pg.name); goto out_pcaps_pattrs; } i++; } of_node_put(powercap); return; out_pcaps_pattrs: while (--i >= 0) { kfree(pcaps[i].pattrs); kfree(pcaps[i].pg.attrs); kfree(pcaps[i].pg.name); } kobject_put(powercap_kobj); of_node_put(node); out_pcaps: kfree(pcaps); out_put_powercap: of_node_put(powercap); }
linux-master
arch/powerpc/platforms/powernv/opal-powercap.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SMP support for PowerNV machines. * * Copyright 2011 IBM Corp. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/sched/hotplug.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/cpu.h> #include <asm/irq.h> #include <asm/smp.h> #include <asm/paca.h> #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/firmware.h> #include <asm/vdso_datapage.h> #include <asm/cputhreads.h> #include <asm/xics.h> #include <asm/xive.h> #include <asm/opal.h> #include <asm/runlatch.h> #include <asm/code-patching.h> #include <asm/dbell.h> #include <asm/kvm_ppc.h> #include <asm/ppc-opcode.h> #include <asm/cpuidle.h> #include <asm/kexec.h> #include <asm/reg.h> #include <asm/powernv.h> #include "powernv.h" #ifdef DEBUG #include <asm/udbg.h> #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) do { } while (0) #endif static void pnv_smp_setup_cpu(int cpu) { /* * P9 workaround for CI vector load (see traps.c), * enable the corresponding HMI interrupt */ if (pvr_version_is(PVR_POWER9)) mtspr(SPRN_HMEER, mfspr(SPRN_HMEER) | PPC_BIT(17)); if (xive_enabled()) xive_smp_setup_cpu(); else if (cpu != boot_cpuid) xics_setup_cpu(); } static int pnv_smp_kick_cpu(int nr) { unsigned int pcpu; unsigned long start_here = __pa(ppc_function_entry(generic_secondary_smp_init)); long rc; uint8_t status; if (nr < 0 || nr >= nr_cpu_ids) return -EINVAL; pcpu = get_hard_smp_processor_id(nr); /* * If we already started or OPAL is not supported, we just * kick the CPU via the PACA */ if (paca_ptrs[nr]->cpu_start || !firmware_has_feature(FW_FEATURE_OPAL)) goto kick; /* * At this point, the CPU can either be spinning on the way in * from kexec or be inside OPAL waiting to be started for the * first time. OPAL v3 allows us to query OPAL to know if it * has the CPUs, so we do that */ rc = opal_query_cpu_status(pcpu, &status); if (rc != OPAL_SUCCESS) { pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr); return -ENODEV; } /* * Already started, just kick it, probably coming from * kexec and spinning */ if (status == OPAL_THREAD_STARTED) goto kick; /* * Available/inactive, let's kick it */ if (status == OPAL_THREAD_INACTIVE) { pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu); rc = opal_start_cpu(pcpu, start_here); if (rc != OPAL_SUCCESS) { pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr); return -ENODEV; } } else { /* * An unavailable CPU (or any other unknown status) * shouldn't be started. It should also * not be in the possible map but currently it can * happen */ pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable" " (status %d)...\n", nr, pcpu, status); return -ENODEV; } kick: return smp_generic_kick_cpu(nr); } #ifdef CONFIG_HOTPLUG_CPU static int pnv_smp_cpu_disable(void) { int cpu = smp_processor_id(); /* This is identical to pSeries... might consolidate by * moving migrate_irqs_away to a ppc_md with default to * the generic fixup_irqs. --BenH. */ set_cpu_online(cpu, false); vdso_data->processorCount--; if (cpu == boot_cpuid) boot_cpuid = cpumask_any(cpu_online_mask); if (xive_enabled()) xive_smp_disable_cpu(); else xics_migrate_irqs_away(); cleanup_cpu_mmu_context(); return 0; } static void pnv_flush_interrupts(void) { if (cpu_has_feature(CPU_FTR_ARCH_300)) { if (xive_enabled()) xive_flush_interrupt(); else icp_opal_flush_interrupt(); } else { icp_native_flush_interrupt(); } } static void pnv_cpu_offline_self(void) { unsigned long srr1, unexpected_mask, wmask; unsigned int cpu; u64 lpcr_val; /* Standard hot unplug procedure */ idle_task_exit(); cpu = smp_processor_id(); DBG("CPU%d offline\n", cpu); generic_set_cpu_dead(cpu); smp_wmb(); wmask = SRR1_WAKEMASK; if (cpu_has_feature(CPU_FTR_ARCH_207S)) wmask = SRR1_WAKEMASK_P8; /* * This turns the irq soft-disabled state we're called with, into a * hard-disabled state with pending irq_happened interrupts cleared. * * PACA_IRQ_DEC - Decrementer should be ignored. * PACA_IRQ_HMI - Can be ignored, processing is done in real mode. * PACA_IRQ_DBELL, EE, PMI - Unexpected. */ hard_irq_disable(); if (generic_check_cpu_restart(cpu)) goto out; unexpected_mask = ~(PACA_IRQ_DEC | PACA_IRQ_HMI | PACA_IRQ_HARD_DIS); if (local_paca->irq_happened & unexpected_mask) { if (local_paca->irq_happened & PACA_IRQ_EE) pnv_flush_interrupts(); DBG("CPU%d Unexpected exit while offline irq_happened=%lx!\n", cpu, local_paca->irq_happened); } local_paca->irq_happened = PACA_IRQ_HARD_DIS; /* * We don't want to take decrementer interrupts while we are * offline, so clear LPCR:PECE1. We keep PECE2 (and * LPCR_PECE_HVEE on P9) enabled so as to let IPIs in. * * If the CPU gets woken up by a special wakeup, ensure that * the SLW engine sets LPCR with decrementer bit cleared, else * the CPU will come back to the kernel due to a spurious * wakeup. */ lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1; pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); while (!generic_check_cpu_restart(cpu)) { /* * Clear IPI flag, since we don't handle IPIs while * offline, except for those when changing micro-threading * mode, which are handled explicitly below, and those * for coming online, which are handled via * generic_check_cpu_restart() calls. */ kvmppc_clear_host_ipi(cpu); srr1 = pnv_cpu_offline(cpu); WARN_ON_ONCE(!irqs_disabled()); WARN_ON(lazy_irq_pending()); /* * If the SRR1 value indicates that we woke up due to * an external interrupt, then clear the interrupt. * We clear the interrupt before checking for the * reason, so as to avoid a race where we wake up for * some other reason, find nothing and clear the interrupt * just as some other cpu is sending us an interrupt. * If we returned from power7_nap as a result of * having finished executing in a KVM guest, then srr1 * contains 0. */ if (((srr1 & wmask) == SRR1_WAKEEE) || ((srr1 & wmask) == SRR1_WAKEHVI)) { pnv_flush_interrupts(); } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); } else if ((srr1 & wmask) == SRR1_WAKERESET) { irq_set_pending_from_srr1(srr1); /* Does not return */ } smp_mb(); /* * For kdump kernels, we process the ipi and jump to * crash_ipi_callback */ if (kdump_in_progress()) { /* * If we got to this point, we've not used * NMI's, otherwise we would have gone * via the SRR1_WAKERESET path. We are * using regular IPI's for waking up offline * threads. */ struct pt_regs regs; ppc_save_regs(&regs); crash_ipi_callback(&regs); /* Does not return */ } if (cpu_core_split_required()) continue; if (srr1 && !generic_check_cpu_restart(cpu)) DBG("CPU%d Unexpected exit while offline srr1=%lx!\n", cpu, srr1); } /* * Re-enable decrementer interrupts in LPCR. * * Further, we want stop states to be woken up by decrementer * for non-hotplug cases. So program the LPCR via stop api as * well. */ lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1; pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); out: DBG("CPU%d coming online...\n", cpu); } #endif /* CONFIG_HOTPLUG_CPU */ static int pnv_cpu_bootable(unsigned int nr) { /* * Starting with POWER8, the subcore logic relies on all threads of a * core being booted so that they can participate in split mode * switches. So on those machines we ignore the smt_enabled_at_boot * setting (smt-enabled on the kernel command line). */ if (cpu_has_feature(CPU_FTR_ARCH_207S)) return 1; return smp_generic_cpu_bootable(nr); } static int pnv_smp_prepare_cpu(int cpu) { if (xive_enabled()) return xive_smp_prepare_cpu(cpu); return 0; } /* Cause IPI as setup by the interrupt controller (xics or xive) */ static void (*ic_cause_ipi)(int cpu); static void pnv_cause_ipi(int cpu) { if (doorbell_try_core_ipi(cpu)) return; ic_cause_ipi(cpu); } static void __init pnv_smp_probe(void) { if (xive_enabled()) xive_smp_probe(); else xics_smp_probe(); if (cpu_has_feature(CPU_FTR_DBELL)) { ic_cause_ipi = smp_ops->cause_ipi; WARN_ON(!ic_cause_ipi); if (cpu_has_feature(CPU_FTR_ARCH_300)) smp_ops->cause_ipi = doorbell_global_ipi; else smp_ops->cause_ipi = pnv_cause_ipi; } } noinstr static int pnv_system_reset_exception(struct pt_regs *regs) { if (smp_handle_nmi_ipi(regs)) return 1; return 0; } static int pnv_cause_nmi_ipi(int cpu) { int64_t rc; if (cpu >= 0) { int h = get_hard_smp_processor_id(cpu); if (opal_check_token(OPAL_QUIESCE)) opal_quiesce(QUIESCE_HOLD, h); rc = opal_signal_system_reset(h); if (opal_check_token(OPAL_QUIESCE)) opal_quiesce(QUIESCE_RESUME, h); if (rc != OPAL_SUCCESS) return 0; return 1; } else if (cpu == NMI_IPI_ALL_OTHERS) { bool success = true; int c; if (opal_check_token(OPAL_QUIESCE)) opal_quiesce(QUIESCE_HOLD, -1); /* * We do not use broadcasts (yet), because it's not clear * exactly what semantics Linux wants or the firmware should * provide. */ for_each_online_cpu(c) { if (c == smp_processor_id()) continue; rc = opal_signal_system_reset( get_hard_smp_processor_id(c)); if (rc != OPAL_SUCCESS) success = false; } if (opal_check_token(OPAL_QUIESCE)) opal_quiesce(QUIESCE_RESUME, -1); if (success) return 1; /* * Caller will fall back to doorbells, which may pick * up the remainders. */ } return 0; } static struct smp_ops_t pnv_smp_ops = { .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */ .cause_ipi = NULL, /* Filled at runtime by pnv_smp_probe() */ .cause_nmi_ipi = NULL, .probe = pnv_smp_probe, .prepare_cpu = pnv_smp_prepare_cpu, .kick_cpu = pnv_smp_kick_cpu, .setup_cpu = pnv_smp_setup_cpu, .cpu_bootable = pnv_cpu_bootable, #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = pnv_smp_cpu_disable, .cpu_die = generic_cpu_die, .cpu_offline_self = pnv_cpu_offline_self, #endif /* CONFIG_HOTPLUG_CPU */ }; /* This is called very early during platform setup_arch */ void __init pnv_smp_init(void) { if (opal_check_token(OPAL_SIGNAL_SYSTEM_RESET)) { ppc_md.system_reset_exception = pnv_system_reset_exception; pnv_smp_ops.cause_nmi_ipi = pnv_cause_nmi_ipi; } smp_ops = &pnv_smp_ops; #ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_KEXEC_CORE crash_wake_offline = 1; #endif #endif }
linux-master
arch/powerpc/platforms/powernv/smp.c
// SPDX-License-Identifier: GPL-2.0-or-later #include <linux/kernel.h> #include <linux/ioport.h> #include <linux/bitmap.h> #include <linux/pci.h> #include <asm/opal.h> #include "pci.h" /* * The majority of the complexity in supporting SR-IOV on PowerNV comes from * the need to put the MMIO space for each VF into a separate PE. Internally * the PHB maps MMIO addresses to a specific PE using the "Memory BAR Table". * The MBT historically only applied to the 64bit MMIO window of the PHB * so it's common to see it referred to as the "M64BT". * * An MBT entry stores the mapped range as an <base>,<mask> pair. This forces * the address range that we want to map to be power-of-two sized and aligned. * For conventional PCI devices this isn't really an issue since PCI device BARs * have the same requirement. * * For a SR-IOV BAR things are a little more awkward since size and alignment * are not coupled. The alignment is set based on the per-VF BAR size, but * the total BAR area is: number-of-vfs * per-vf-size. The number of VFs * isn't necessarily a power of two, so neither is the total size. To fix that * we need to finesse (read: hack) the Linux BAR allocator so that it will * allocate the SR-IOV BARs in a way that lets us map them using the MBT. * * The changes to size and alignment that we need to do depend on the "mode" * of MBT entry that we use. We only support SR-IOV on PHB3 (IODA2) and above, * so as a baseline we can assume that we have the following BAR modes * available: * * NB: $PE_COUNT is the number of PEs that the PHB supports. * * a) A segmented BAR that splits the mapped range into $PE_COUNT equally sized * segments. The n'th segment is mapped to the n'th PE. * b) An un-segmented BAR that maps the whole address range to a specific PE. * * * We prefer to use mode a) since it only requires one MBT entry per SR-IOV BAR * For comparison b) requires one entry per-VF per-BAR, or: * (num-vfs * num-sriov-bars) in total. To use a) we need the size of each segment * to equal the size of the per-VF BAR area. So: * * new_size = per-vf-size * number-of-PEs * * The alignment for the SR-IOV BAR also needs to be changed from per-vf-size * to "new_size", calculated above. Implementing this is a convoluted process * which requires several hooks in the PCI core: * * 1. In pcibios_device_add() we call pnv_pci_ioda_fixup_iov(). * * At this point the device has been probed and the device's BARs are sized, * but no resource allocations have been done. The SR-IOV BARs are sized * based on the maximum number of VFs supported by the device and we need * to increase that to new_size. * * 2. Later, when Linux actually assigns resources it tries to make the resource * allocations for each PCI bus as compact as possible. As a part of that it * sorts the BARs on a bus by their required alignment, which is calculated * using pci_resource_alignment(). * * For IOV resources this goes: * pci_resource_alignment() * pci_sriov_resource_alignment() * pcibios_sriov_resource_alignment() * pnv_pci_iov_resource_alignment() * * Our hook overrides the default alignment, equal to the per-vf-size, with * new_size computed above. * * 3. When userspace enables VFs for a device: * * sriov_enable() * pcibios_sriov_enable() * pnv_pcibios_sriov_enable() * * This is where we actually allocate PE numbers for each VF and setup the * MBT mapping for each SR-IOV BAR. In steps 1) and 2) we setup an "arena" * where each MBT segment is equal in size to the VF BAR so we can shift * around the actual SR-IOV BAR location within this arena. We need this * ability because the PE space is shared by all devices on the same PHB. * When using mode a) described above segment 0 in maps to PE#0 which might * be already being used by another device on the PHB. * * As a result we need allocate a contigious range of PE numbers, then shift * the address programmed into the SR-IOV BAR of the PF so that the address * of VF0 matches up with the segment corresponding to the first allocated * PE number. This is handled in pnv_pci_vf_resource_shift(). * * Once all that is done we return to the PCI core which then enables VFs, * scans them and creates pci_devs for each. The init process for a VF is * largely the same as a normal device, but the VF is inserted into the IODA * PE that we allocated for it rather than the PE associated with the bus. * * 4. When userspace disables VFs we unwind the above in * pnv_pcibios_sriov_disable(). Fortunately this is relatively simple since * we don't need to validate anything, just tear down the mappings and * move SR-IOV resource back to its "proper" location. * * That's how mode a) works. In theory mode b) (single PE mapping) is less work * since we can map each individual VF with a separate BAR. However, there's a * few limitations: * * 1) For IODA2 mode b) has a minimum alignment requirement of 32MB. This makes * it only usable for devices with very large per-VF BARs. Such devices are * similar to Big Foot. They definitely exist, but I've never seen one. * * 2) The number of MBT entries that we have is limited. PHB3 and PHB4 only * 16 total and some are needed for. Most SR-IOV capable network cards can support * more than 16 VFs on each port. * * We use b) when using a) would use more than 1/4 of the entire 64 bit MMIO * window of the PHB. * * * * PHB4 (IODA3) added a few new features that would be useful for SR-IOV. It * allowed the MBT to map 32bit MMIO space in addition to 64bit which allows * us to support SR-IOV BARs in the 32bit MMIO window. This is useful since * the Linux BAR allocation will place any BAR marked as non-prefetchable into * the non-prefetchable bridge window, which is 32bit only. It also added two * new modes: * * c) A segmented BAR similar to a), but each segment can be individually * mapped to any PE. This is matches how the 32bit MMIO window worked on * IODA1&2. * * d) A segmented BAR with 8, 64, or 128 segments. This works similarly to a), * but with fewer segments and configurable base PE. * * i.e. The n'th segment maps to the (n + base)'th PE. * * The base PE is also required to be a multiple of the window size. * * Unfortunately, the OPAL API doesn't currently (as of skiboot v6.6) allow us * to exploit any of the IODA3 features. */ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev) { struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct resource *res; int i; resource_size_t vf_bar_sz; struct pnv_iov_data *iov; int mul; iov = kzalloc(sizeof(*iov), GFP_KERNEL); if (!iov) goto disable_iov; pdev->dev.archdata.iov_data = iov; mul = phb->ioda.total_pe_num; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &pdev->resource[i + PCI_IOV_RESOURCES]; if (!res->flags || res->parent) continue; if (!pnv_pci_is_m64_flags(res->flags)) { dev_warn(&pdev->dev, "Don't support SR-IOV with non M64 VF BAR%d: %pR. \n", i, res); goto disable_iov; } vf_bar_sz = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES); /* * Generally, one segmented M64 BAR maps one IOV BAR. However, * if a VF BAR is too large we end up wasting a lot of space. * If each VF needs more than 1/4 of the default m64 segment * then each VF BAR should be mapped in single-PE mode to reduce * the amount of space required. This does however limit the * number of VFs we can support. * * The 1/4 limit is arbitrary and can be tweaked. */ if (vf_bar_sz > (phb->ioda.m64_segsize >> 2)) { /* * On PHB3, the minimum size alignment of M64 BAR in * single mode is 32MB. If this VF BAR is smaller than * 32MB, but still too large for a segmented window * then we can't map it and need to disable SR-IOV for * this device. */ if (vf_bar_sz < SZ_32M) { pci_err(pdev, "VF BAR%d: %pR can't be mapped in single PE mode\n", i, res); goto disable_iov; } iov->m64_single_mode[i] = true; continue; } /* * This BAR can be mapped with one segmented window, so adjust * te resource size to accommodate. */ pci_dbg(pdev, " Fixing VF BAR%d: %pR to\n", i, res); res->end = res->start + vf_bar_sz * mul - 1; pci_dbg(pdev, " %pR\n", res); pci_info(pdev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)", i, res, mul); iov->need_shift = true; } return; disable_iov: /* Save ourselves some MMIO space by disabling the unusable BARs */ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &pdev->resource[i + PCI_IOV_RESOURCES]; res->flags = 0; res->end = res->start - 1; } pdev->dev.archdata.iov_data = NULL; kfree(iov); } void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev) { if (pdev->is_virtfn) { struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev); /* * VF PEs are single-device PEs so their pdev pointer needs to * be set. The pdev doesn't exist when the PE is allocated (in * (pcibios_sriov_enable()) so we fix it up here. */ pe->pdev = pdev; WARN_ON(!(pe->flags & PNV_IODA_PE_VF)); } else if (pdev->is_physfn) { /* * For PFs adjust their allocated IOV resources to match what * the PHB can support using it's M64 BAR table. */ pnv_pci_ioda_fixup_iov_resources(pdev); } } resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, int resno) { resource_size_t align = pci_iov_resource_size(pdev, resno); struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus); struct pnv_iov_data *iov = pnv_iov_get(pdev); /* * iov can be null if we have an SR-IOV device with IOV BAR that can't * be placed in the m64 space (i.e. The BAR is 32bit or non-prefetch). * In that case we don't allow VFs to be enabled since one of their * BARs would not be placed in the correct PE. */ if (!iov) return align; /* * If we're using single mode then we can just use the native VF BAR * alignment. We validated that it's possible to use a single PE * window above when we did the fixup. */ if (iov->m64_single_mode[resno - PCI_IOV_RESOURCES]) return align; /* * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the * SR-IOV. While from hardware perspective, the range mapped by M64 * BAR should be size aligned. * * This function returns the total IOV BAR size if M64 BAR is in * Shared PE mode or just VF BAR size if not. * If the M64 BAR is in Single PE mode, return the VF BAR size or * M64 segment size if IOV BAR size is less. */ return phb->ioda.total_pe_num * align; } static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs) { struct pnv_iov_data *iov; struct pnv_phb *phb; int window_id; phb = pci_bus_to_pnvhb(pdev->bus); iov = pnv_iov_get(pdev); for_each_set_bit(window_id, iov->used_m64_bar_mask, MAX_M64_BARS) { opal_pci_phb_mmio_enable(phb->opal_id, OPAL_M64_WINDOW_TYPE, window_id, 0); clear_bit(window_id, &phb->ioda.m64_bar_alloc); } return 0; } /* * PHB3 and beyond support segmented windows. The window's address range * is subdivided into phb->ioda.total_pe_num segments and there's a 1-1 * mapping between PEs and segments. */ static int64_t pnv_ioda_map_m64_segmented(struct pnv_phb *phb, int window_id, resource_size_t start, resource_size_t size) { int64_t rc; rc = opal_pci_set_phb_mem_window(phb->opal_id, OPAL_M64_WINDOW_TYPE, window_id, start, 0, /* unused */ size); if (rc) goto out; rc = opal_pci_phb_mmio_enable(phb->opal_id, OPAL_M64_WINDOW_TYPE, window_id, OPAL_ENABLE_M64_SPLIT); out: if (rc) pr_err("Failed to map M64 window #%d: %lld\n", window_id, rc); return rc; } static int64_t pnv_ioda_map_m64_single(struct pnv_phb *phb, int pe_num, int window_id, resource_size_t start, resource_size_t size) { int64_t rc; /* * The API for setting up m64 mmio windows seems to have been designed * with P7-IOC in mind. For that chip each M64 BAR (window) had a fixed * split of 8 equally sized segments each of which could individually * assigned to a PE. * * The problem with this is that the API doesn't have any way to * communicate the number of segments we want on a BAR. This wasn't * a problem for p7-ioc since you didn't have a choice, but the * single PE windows added in PHB3 don't map cleanly to this API. * * As a result we've got this slightly awkward process where we * call opal_pci_map_pe_mmio_window() to put the single in single * PE mode, and set the PE for the window before setting the address * bounds. We need to do it this way because the single PE windows * for PHB3 have different alignment requirements on PHB3. */ rc = opal_pci_map_pe_mmio_window(phb->opal_id, pe_num, OPAL_M64_WINDOW_TYPE, window_id, 0); if (rc) goto out; /* * NB: In single PE mode the window needs to be aligned to 32MB */ rc = opal_pci_set_phb_mem_window(phb->opal_id, OPAL_M64_WINDOW_TYPE, window_id, start, 0, /* ignored by FW, m64 is 1-1 */ size); if (rc) goto out; /* * Now actually enable it. We specified the BAR should be in "non-split" * mode so FW will validate that the BAR is in single PE mode. */ rc = opal_pci_phb_mmio_enable(phb->opal_id, OPAL_M64_WINDOW_TYPE, window_id, OPAL_ENABLE_M64_NON_SPLIT); out: if (rc) pr_err("Error mapping single PE BAR\n"); return rc; } static int pnv_pci_alloc_m64_bar(struct pnv_phb *phb, struct pnv_iov_data *iov) { int win; do { win = find_next_zero_bit(&phb->ioda.m64_bar_alloc, phb->ioda.m64_bar_idx + 1, 0); if (win >= phb->ioda.m64_bar_idx + 1) return -1; } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc)); set_bit(win, iov->used_m64_bar_mask); return win; } static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs) { struct pnv_iov_data *iov; struct pnv_phb *phb; int win; struct resource *res; int i, j; int64_t rc; resource_size_t size, start; int base_pe_num; phb = pci_bus_to_pnvhb(pdev->bus); iov = pnv_iov_get(pdev); for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &pdev->resource[i + PCI_IOV_RESOURCES]; if (!res->flags || !res->parent) continue; /* don't need single mode? map everything in one go! */ if (!iov->m64_single_mode[i]) { win = pnv_pci_alloc_m64_bar(phb, iov); if (win < 0) goto m64_failed; size = resource_size(res); start = res->start; rc = pnv_ioda_map_m64_segmented(phb, win, start, size); if (rc) goto m64_failed; continue; } /* otherwise map each VF with single PE BARs */ size = pci_iov_resource_size(pdev, PCI_IOV_RESOURCES + i); base_pe_num = iov->vf_pe_arr[0].pe_number; for (j = 0; j < num_vfs; j++) { win = pnv_pci_alloc_m64_bar(phb, iov); if (win < 0) goto m64_failed; start = res->start + size * j; rc = pnv_ioda_map_m64_single(phb, win, base_pe_num + j, start, size); if (rc) goto m64_failed; } } return 0; m64_failed: pnv_pci_vf_release_m64(pdev, num_vfs); return -EBUSY; } static void pnv_ioda_release_vf_PE(struct pci_dev *pdev) { struct pnv_phb *phb; struct pnv_ioda_pe *pe, *pe_n; phb = pci_bus_to_pnvhb(pdev->bus); if (!pdev->is_physfn) return; /* FIXME: Use pnv_ioda_release_pe()? */ list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) { if (pe->parent_dev != pdev) continue; pnv_pci_ioda2_release_pe_dma(pe); /* Remove from list */ mutex_lock(&phb->ioda.pe_list_mutex); list_del(&pe->list); mutex_unlock(&phb->ioda.pe_list_mutex); pnv_ioda_deconfigure_pe(phb, pe); pnv_ioda_free_pe(pe); } } static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset) { struct resource *res, res2; struct pnv_iov_data *iov; resource_size_t size; u16 num_vfs; int i; if (!dev->is_physfn) return -EINVAL; iov = pnv_iov_get(dev); /* * "offset" is in VFs. The M64 windows are sized so that when they * are segmented, each segment is the same size as the IOV BAR. * Each segment is in a separate PE, and the high order bits of the * address are the PE number. Therefore, each VF's BAR is in a * separate PE, and changing the IOV BAR start address changes the * range of PEs the VFs are in. */ num_vfs = iov->num_vfs; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &dev->resource[i + PCI_IOV_RESOURCES]; if (!res->flags || !res->parent) continue; if (iov->m64_single_mode[i]) continue; /* * The actual IOV BAR range is determined by the start address * and the actual size for num_vfs VFs BAR. This check is to * make sure that after shifting, the range will not overlap * with another device. */ size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); res2.flags = res->flags; res2.start = res->start + (size * offset); res2.end = res2.start + (size * num_vfs) - 1; if (res2.end > res->end) { dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n", i, &res2, res, num_vfs, offset); return -EBUSY; } } /* * Since M64 BAR shares segments among all possible 256 PEs, * we have to shift the beginning of PF IOV BAR to make it start from * the segment which belongs to the PE number assigned to the first VF. * This creates a "hole" in the /proc/iomem which could be used for * allocating other resources so we reserve this area below and * release when IOV is released. */ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &dev->resource[i + PCI_IOV_RESOURCES]; if (!res->flags || !res->parent) continue; if (iov->m64_single_mode[i]) continue; size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); res2 = *res; res->start += size * offset; dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n", i, &res2, res, (offset > 0) ? "En" : "Dis", num_vfs, offset); if (offset < 0) { devm_release_resource(&dev->dev, &iov->holes[i]); memset(&iov->holes[i], 0, sizeof(iov->holes[i])); } pci_update_resource(dev, i + PCI_IOV_RESOURCES); if (offset > 0) { iov->holes[i].start = res2.start; iov->holes[i].end = res2.start + size * offset - 1; iov->holes[i].flags = IORESOURCE_BUS; iov->holes[i].name = "pnv_iov_reserved"; devm_request_resource(&dev->dev, res->parent, &iov->holes[i]); } } return 0; } static void pnv_pci_sriov_disable(struct pci_dev *pdev) { u16 num_vfs, base_pe; struct pnv_iov_data *iov; iov = pnv_iov_get(pdev); if (WARN_ON(!iov)) return; num_vfs = iov->num_vfs; base_pe = iov->vf_pe_arr[0].pe_number; /* Release VF PEs */ pnv_ioda_release_vf_PE(pdev); /* Un-shift the IOV BARs if we need to */ if (iov->need_shift) pnv_pci_vf_resource_shift(pdev, -base_pe); /* Release M64 windows */ pnv_pci_vf_release_m64(pdev, num_vfs); } static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) { struct pnv_phb *phb; struct pnv_ioda_pe *pe; int pe_num; u16 vf_index; struct pnv_iov_data *iov; struct pci_dn *pdn; if (!pdev->is_physfn) return; phb = pci_bus_to_pnvhb(pdev->bus); pdn = pci_get_pdn(pdev); iov = pnv_iov_get(pdev); /* Reserve PE for each VF */ for (vf_index = 0; vf_index < num_vfs; vf_index++) { int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index); int vf_bus = pci_iov_virtfn_bus(pdev, vf_index); struct pci_dn *vf_pdn; pe = &iov->vf_pe_arr[vf_index]; pe->phb = phb; pe->flags = PNV_IODA_PE_VF; pe->pbus = NULL; pe->parent_dev = pdev; pe->mve_number = -1; pe->rid = (vf_bus << 8) | vf_devfn; pe_num = pe->pe_number; pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n", pci_domain_nr(pdev->bus), pdev->bus->number, PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num); if (pnv_ioda_configure_pe(phb, pe)) { /* XXX What do we do here ? */ pnv_ioda_free_pe(pe); pe->pdev = NULL; continue; } /* Put PE to the list */ mutex_lock(&phb->ioda.pe_list_mutex); list_add_tail(&pe->list, &phb->ioda.pe_list); mutex_unlock(&phb->ioda.pe_list_mutex); /* associate this pe to it's pdn */ list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) { if (vf_pdn->busno == vf_bus && vf_pdn->devfn == vf_devfn) { vf_pdn->pe_number = pe_num; break; } } pnv_pci_ioda2_setup_dma_pe(phb, pe); } } static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs) { struct pnv_ioda_pe *base_pe; struct pnv_iov_data *iov; struct pnv_phb *phb; int ret; u16 i; phb = pci_bus_to_pnvhb(pdev->bus); iov = pnv_iov_get(pdev); /* * There's a calls to IODA2 PE setup code littered throughout. We could * probably fix that, but we'd still have problems due to the * restriction inherent on IODA1 PHBs. * * NB: We class IODA3 as IODA2 since they're very similar. */ if (phb->type != PNV_PHB_IODA2) { pci_err(pdev, "SR-IOV is not supported on this PHB\n"); return -ENXIO; } if (!iov) { dev_info(&pdev->dev, "don't support this SRIOV device with non 64bit-prefetchable IOV BAR\n"); return -ENOSPC; } /* allocate a contiguous block of PEs for our VFs */ base_pe = pnv_ioda_alloc_pe(phb, num_vfs); if (!base_pe) { pci_err(pdev, "Unable to allocate PEs for %d VFs\n", num_vfs); return -EBUSY; } iov->vf_pe_arr = base_pe; iov->num_vfs = num_vfs; /* Assign M64 window accordingly */ ret = pnv_pci_vf_assign_m64(pdev, num_vfs); if (ret) { dev_info(&pdev->dev, "Not enough M64 window resources\n"); goto m64_failed; } /* * When using one M64 BAR to map one IOV BAR, we need to shift * the IOV BAR according to the PE# allocated to the VFs. * Otherwise, the PE# for the VF will conflict with others. */ if (iov->need_shift) { ret = pnv_pci_vf_resource_shift(pdev, base_pe->pe_number); if (ret) goto shift_failed; } /* Setup VF PEs */ pnv_ioda_setup_vf_PE(pdev, num_vfs); return 0; shift_failed: pnv_pci_vf_release_m64(pdev, num_vfs); m64_failed: for (i = 0; i < num_vfs; i++) pnv_ioda_free_pe(&iov->vf_pe_arr[i]); return ret; } int pnv_pcibios_sriov_disable(struct pci_dev *pdev) { pnv_pci_sriov_disable(pdev); /* Release PCI data */ remove_sriov_vf_pdns(pdev); return 0; } int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) { /* Allocate PCI data */ add_sriov_vf_pdns(pdev); return pnv_pci_sriov_enable(pdev, num_vfs); }
linux-master
arch/powerpc/platforms/powernv/pci-sriov.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2013, Michael Ellerman, IBM Corporation. */ #define pr_fmt(fmt) "powernv-rng: " fmt #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/slab.h> #include <linux/smp.h> #include <asm/archrandom.h> #include <asm/cputable.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/smp.h> #include "powernv.h" #define DARN_ERR 0xFFFFFFFFFFFFFFFFul struct pnv_rng { void __iomem *regs; void __iomem *regs_real; unsigned long mask; }; static DEFINE_PER_CPU(struct pnv_rng *, pnv_rng); static unsigned long rng_whiten(struct pnv_rng *rng, unsigned long val) { unsigned long parity; /* Calculate the parity of the value */ asm (".machine push; \ .machine power7; \ popcntd %0,%1; \ .machine pop;" : "=r" (parity) : "r" (val)); /* xor our value with the previous mask */ val ^= rng->mask; /* update the mask based on the parity of this value */ rng->mask = (rng->mask << 1) | (parity & 1); return val; } static int pnv_get_random_darn(unsigned long *v) { unsigned long val; /* Using DARN with L=1 - 64-bit conditioned random number */ asm volatile(PPC_DARN(%0, 1) : "=r"(val)); if (val == DARN_ERR) return 0; *v = val; return 1; } static int __init initialise_darn(void) { unsigned long val; int i; if (!cpu_has_feature(CPU_FTR_ARCH_300)) return -ENODEV; for (i = 0; i < 10; i++) { if (pnv_get_random_darn(&val)) { ppc_md.get_random_seed = pnv_get_random_darn; return 0; } } return -EIO; } int pnv_get_random_long(unsigned long *v) { struct pnv_rng *rng; if (mfmsr() & MSR_DR) { rng = get_cpu_var(pnv_rng); *v = rng_whiten(rng, in_be64(rng->regs)); put_cpu_var(rng); } else { rng = raw_cpu_read(pnv_rng); *v = rng_whiten(rng, __raw_rm_readq(rng->regs_real)); } return 1; } EXPORT_SYMBOL_GPL(pnv_get_random_long); static __init void rng_init_per_cpu(struct pnv_rng *rng, struct device_node *dn) { int chip_id, cpu; chip_id = of_get_ibm_chip_id(dn); if (chip_id == -1) pr_warn("No ibm,chip-id found for %pOF.\n", dn); for_each_possible_cpu(cpu) { if (per_cpu(pnv_rng, cpu) == NULL || cpu_to_chip_id(cpu) == chip_id) { per_cpu(pnv_rng, cpu) = rng; } } } static __init int rng_create(struct device_node *dn) { struct pnv_rng *rng; struct resource res; unsigned long val; rng = kzalloc(sizeof(*rng), GFP_KERNEL); if (!rng) return -ENOMEM; if (of_address_to_resource(dn, 0, &res)) { kfree(rng); return -ENXIO; } rng->regs_real = (void __iomem *)res.start; rng->regs = of_iomap(dn, 0); if (!rng->regs) { kfree(rng); return -ENXIO; } val = in_be64(rng->regs); rng->mask = val; rng_init_per_cpu(rng, dn); ppc_md.get_random_seed = pnv_get_random_long; return 0; } static int __init pnv_get_random_long_early(unsigned long *v) { struct device_node *dn; if (!slab_is_available()) return 0; if (cmpxchg(&ppc_md.get_random_seed, pnv_get_random_long_early, NULL) != pnv_get_random_long_early) return 0; for_each_compatible_node(dn, NULL, "ibm,power-rng") rng_create(dn); if (!ppc_md.get_random_seed) return 0; return ppc_md.get_random_seed(v); } void __init pnv_rng_init(void) { struct device_node *dn; /* Prefer darn over the rest. */ if (!initialise_darn()) return; dn = of_find_compatible_node(NULL, NULL, "ibm,power-rng"); if (dn) ppc_md.get_random_seed = pnv_get_random_long_early; of_node_put(dn); } static int __init pnv_rng_late_init(void) { struct device_node *dn; unsigned long v; /* In case it wasn't called during init for some other reason. */ if (ppc_md.get_random_seed == pnv_get_random_long_early) pnv_get_random_long_early(&v); if (ppc_md.get_random_seed == pnv_get_random_long) { for_each_compatible_node(dn, NULL, "ibm,power-rng") of_platform_device_create(dn, NULL, NULL); } return 0; } machine_subsys_initcall(powernv, pnv_rng_late_init);
linux-master
arch/powerpc/platforms/powernv/rng.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Error log support on PowerNV. * * Copyright 2013,2014 IBM Corp. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/fs.h> #include <linux/vmalloc.h> #include <linux/fcntl.h> #include <linux/kobject.h> #include <linux/uaccess.h> #include <asm/opal.h> struct elog_obj { struct kobject kobj; struct bin_attribute raw_attr; uint64_t id; uint64_t type; size_t size; char *buffer; }; #define to_elog_obj(x) container_of(x, struct elog_obj, kobj) struct elog_attribute { struct attribute attr; ssize_t (*show)(struct elog_obj *elog, struct elog_attribute *attr, char *buf); ssize_t (*store)(struct elog_obj *elog, struct elog_attribute *attr, const char *buf, size_t count); }; #define to_elog_attr(x) container_of(x, struct elog_attribute, attr) static ssize_t elog_id_show(struct elog_obj *elog_obj, struct elog_attribute *attr, char *buf) { return sprintf(buf, "0x%llx\n", elog_obj->id); } static const char *elog_type_to_string(uint64_t type) { switch (type) { case 0: return "PEL"; default: return "unknown"; } } static ssize_t elog_type_show(struct elog_obj *elog_obj, struct elog_attribute *attr, char *buf) { return sprintf(buf, "0x%llx %s\n", elog_obj->type, elog_type_to_string(elog_obj->type)); } static ssize_t elog_ack_show(struct elog_obj *elog_obj, struct elog_attribute *attr, char *buf) { return sprintf(buf, "ack - acknowledge log message\n"); } static ssize_t elog_ack_store(struct elog_obj *elog_obj, struct elog_attribute *attr, const char *buf, size_t count) { /* * Try to self remove this attribute. If we are successful, * delete the kobject itself. */ if (sysfs_remove_file_self(&elog_obj->kobj, &attr->attr)) { opal_send_ack_elog(elog_obj->id); kobject_put(&elog_obj->kobj); } return count; } static struct elog_attribute id_attribute = __ATTR(id, 0444, elog_id_show, NULL); static struct elog_attribute type_attribute = __ATTR(type, 0444, elog_type_show, NULL); static struct elog_attribute ack_attribute = __ATTR(acknowledge, 0660, elog_ack_show, elog_ack_store); static struct kset *elog_kset; static ssize_t elog_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct elog_attribute *attribute; struct elog_obj *elog; attribute = to_elog_attr(attr); elog = to_elog_obj(kobj); if (!attribute->show) return -EIO; return attribute->show(elog, attribute, buf); } static ssize_t elog_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct elog_attribute *attribute; struct elog_obj *elog; attribute = to_elog_attr(attr); elog = to_elog_obj(kobj); if (!attribute->store) return -EIO; return attribute->store(elog, attribute, buf, len); } static const struct sysfs_ops elog_sysfs_ops = { .show = elog_attr_show, .store = elog_attr_store, }; static void elog_release(struct kobject *kobj) { struct elog_obj *elog; elog = to_elog_obj(kobj); kfree(elog->buffer); kfree(elog); } static struct attribute *elog_default_attrs[] = { &id_attribute.attr, &type_attribute.attr, &ack_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(elog_default); static struct kobj_type elog_ktype = { .sysfs_ops = &elog_sysfs_ops, .release = &elog_release, .default_groups = elog_default_groups, }; /* Maximum size of a single log on FSP is 16KB */ #define OPAL_MAX_ERRLOG_SIZE 16384 static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { int opal_rc; struct elog_obj *elog = to_elog_obj(kobj); /* We may have had an error reading before, so let's retry */ if (!elog->buffer) { elog->buffer = kzalloc(elog->size, GFP_KERNEL); if (!elog->buffer) return -EIO; opal_rc = opal_read_elog(__pa(elog->buffer), elog->size, elog->id); if (opal_rc != OPAL_SUCCESS) { pr_err_ratelimited("ELOG: log read failed for log-id=%llx\n", elog->id); kfree(elog->buffer); elog->buffer = NULL; return -EIO; } } memcpy(buffer, elog->buffer + pos, count); return count; } static void create_elog_obj(uint64_t id, size_t size, uint64_t type) { struct elog_obj *elog; int rc; elog = kzalloc(sizeof(*elog), GFP_KERNEL); if (!elog) return; elog->kobj.kset = elog_kset; kobject_init(&elog->kobj, &elog_ktype); sysfs_bin_attr_init(&elog->raw_attr); elog->raw_attr.attr.name = "raw"; elog->raw_attr.attr.mode = 0400; elog->raw_attr.size = size; elog->raw_attr.read = raw_attr_read; elog->id = id; elog->size = size; elog->type = type; elog->buffer = kzalloc(elog->size, GFP_KERNEL); if (elog->buffer) { rc = opal_read_elog(__pa(elog->buffer), elog->size, elog->id); if (rc != OPAL_SUCCESS) { pr_err("ELOG: log read failed for log-id=%llx\n", elog->id); kfree(elog->buffer); elog->buffer = NULL; } } rc = kobject_add(&elog->kobj, NULL, "0x%llx", id); if (rc) { kobject_put(&elog->kobj); return; } /* * As soon as the sysfs file for this elog is created/activated there is * a chance the opal_errd daemon (or any userspace) might read and * acknowledge the elog before kobject_uevent() is called. If that * happens then there is a potential race between * elog_ack_store->kobject_put() and kobject_uevent() which leads to a * use-after-free of a kernfs object resulting in a kernel crash. * * To avoid that, we need to take a reference on behalf of the bin file, * so that our reference remains valid while we call kobject_uevent(). * We then drop our reference before exiting the function, leaving the * bin file to drop the last reference (if it hasn't already). */ /* Take a reference for the bin file */ kobject_get(&elog->kobj); rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr); if (rc == 0) { kobject_uevent(&elog->kobj, KOBJ_ADD); } else { /* Drop the reference taken for the bin file */ kobject_put(&elog->kobj); } /* Drop our reference */ kobject_put(&elog->kobj); return; } static irqreturn_t elog_event(int irq, void *data) { __be64 size; __be64 id; __be64 type; uint64_t elog_size; uint64_t log_id; uint64_t elog_type; int rc; char name[2+16+1]; struct kobject *kobj; rc = opal_get_elog_size(&id, &size, &type); if (rc != OPAL_SUCCESS) { pr_err("ELOG: OPAL log info read failed\n"); return IRQ_HANDLED; } elog_size = be64_to_cpu(size); log_id = be64_to_cpu(id); elog_type = be64_to_cpu(type); WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE); if (elog_size >= OPAL_MAX_ERRLOG_SIZE) elog_size = OPAL_MAX_ERRLOG_SIZE; sprintf(name, "0x%llx", log_id); /* we may get notified twice, let's handle * that gracefully and not create two conflicting * entries. */ kobj = kset_find_obj(elog_kset, name); if (kobj) { /* Drop reference added by kset_find_obj() */ kobject_put(kobj); return IRQ_HANDLED; } create_elog_obj(log_id, elog_size, elog_type); return IRQ_HANDLED; } int __init opal_elog_init(void) { int rc = 0, irq; /* ELOG not supported by firmware */ if (!opal_check_token(OPAL_ELOG_READ)) return -1; elog_kset = kset_create_and_add("elog", NULL, opal_kobj); if (!elog_kset) { pr_warn("%s: failed to create elog kset\n", __func__); return -1; } irq = opal_event_request(ilog2(OPAL_EVENT_ERROR_LOG_AVAIL)); if (!irq) { pr_err("%s: Can't register OPAL event irq (%d)\n", __func__, irq); return irq; } rc = request_threaded_irq(irq, NULL, elog_event, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "opal-elog", NULL); if (rc) { pr_err("%s: Can't request OPAL event irq (%d)\n", __func__, rc); return rc; } /* We are now ready to pull error logs from opal. */ if (opal_check_token(OPAL_ELOG_RESEND)) opal_resend_pending_logs(); return 0; }
linux-master
arch/powerpc/platforms/powernv/opal-elog.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * IO workarounds for PCI on Celleb/Cell platform * * (C) Copyright 2006-2007 TOSHIBA CORPORATION */ #undef DEBUG #include <linux/kernel.h> #include <linux/of_address.h> #include <linux/slab.h> #include <linux/io.h> #include <asm/ppc-pci.h> #include <asm/pci-bridge.h> #include <asm/io-workarounds.h> #define SPIDER_PCI_DISABLE_PREFETCH struct spiderpci_iowa_private { void __iomem *regs; }; static void spiderpci_io_flush(struct iowa_bus *bus) { struct spiderpci_iowa_private *priv; priv = bus->private; in_be32(priv->regs + SPIDER_PCI_DUMMY_READ); iosync(); } #define SPIDER_PCI_MMIO_READ(name, ret) \ static ret spiderpci_##name(const PCI_IO_ADDR addr) \ { \ ret val = __do_##name(addr); \ spiderpci_io_flush(iowa_mem_find_bus(addr)); \ return val; \ } #define SPIDER_PCI_MMIO_READ_STR(name) \ static void spiderpci_##name(const PCI_IO_ADDR addr, void *buf, \ unsigned long count) \ { \ __do_##name(addr, buf, count); \ spiderpci_io_flush(iowa_mem_find_bus(addr)); \ } SPIDER_PCI_MMIO_READ(readb, u8) SPIDER_PCI_MMIO_READ(readw, u16) SPIDER_PCI_MMIO_READ(readl, u32) SPIDER_PCI_MMIO_READ(readq, u64) SPIDER_PCI_MMIO_READ(readw_be, u16) SPIDER_PCI_MMIO_READ(readl_be, u32) SPIDER_PCI_MMIO_READ(readq_be, u64) SPIDER_PCI_MMIO_READ_STR(readsb) SPIDER_PCI_MMIO_READ_STR(readsw) SPIDER_PCI_MMIO_READ_STR(readsl) static void spiderpci_memcpy_fromio(void *dest, const PCI_IO_ADDR src, unsigned long n) { __do_memcpy_fromio(dest, src, n); spiderpci_io_flush(iowa_mem_find_bus(src)); } static int __init spiderpci_pci_setup_chip(struct pci_controller *phb, void __iomem *regs) { void *dummy_page_va; dma_addr_t dummy_page_da; #ifdef SPIDER_PCI_DISABLE_PREFETCH u32 val = in_be32(regs + SPIDER_PCI_VCI_CNTL_STAT); pr_debug("SPIDER_IOWA:PVCI_Control_Status was 0x%08x\n", val); out_be32(regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8); #endif /* SPIDER_PCI_DISABLE_PREFETCH */ /* setup dummy read */ /* * On CellBlade, we can't know that which XDR memory is used by * kmalloc() to allocate dummy_page_va. * In order to improve the performance, the XDR which is used to * allocate dummy_page_va is the nearest the spider-pci. * We have to select the CBE which is the nearest the spider-pci * to allocate memory from the best XDR, but I don't know that * how to do. * * Celleb does not have this problem, because it has only one XDR. */ dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!dummy_page_va) { pr_err("SPIDERPCI-IOWA:Alloc dummy_page_va failed.\n"); return -1; } dummy_page_da = dma_map_single(phb->parent, dummy_page_va, PAGE_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(phb->parent, dummy_page_da)) { pr_err("SPIDER-IOWA:Map dummy page filed.\n"); kfree(dummy_page_va); return -1; } out_be32(regs + SPIDER_PCI_DUMMY_READ_BASE, dummy_page_da); return 0; } int __init spiderpci_iowa_init(struct iowa_bus *bus, void *data) { void __iomem *regs = NULL; struct spiderpci_iowa_private *priv; struct device_node *np = bus->phb->dn; struct resource r; unsigned long offset = (unsigned long)data; pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%pOF)\n", np); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { pr_err("SPIDERPCI-IOWA:" "Can't allocate struct spiderpci_iowa_private"); return -1; } if (of_address_to_resource(np, 0, &r)) { pr_err("SPIDERPCI-IOWA:Can't get resource.\n"); goto error; } regs = ioremap(r.start + offset, SPIDER_PCI_REG_SIZE); if (!regs) { pr_err("SPIDERPCI-IOWA:ioremap failed.\n"); goto error; } priv->regs = regs; bus->private = priv; if (spiderpci_pci_setup_chip(bus->phb, regs)) goto error; return 0; error: kfree(priv); bus->private = NULL; if (regs) iounmap(regs); return -1; } struct ppc_pci_io spiderpci_ops = { .readb = spiderpci_readb, .readw = spiderpci_readw, .readl = spiderpci_readl, .readq = spiderpci_readq, .readw_be = spiderpci_readw_be, .readl_be = spiderpci_readl_be, .readq_be = spiderpci_readq_be, .readsb = spiderpci_readsb, .readsw = spiderpci_readsw, .readsl = spiderpci_readsl, .memcpy_fromio = spiderpci_memcpy_fromio, };
linux-master
arch/powerpc/platforms/cell/spider-pci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2007, Michael Ellerman, IBM Corporation. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/msi.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/debugfs.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <asm/dcr.h> #include <asm/machdep.h> #include "cell.h" /* * MSIC registers, specified as offsets from dcr_base */ #define MSIC_CTRL_REG 0x0 /* Base Address registers specify FIFO location in BE memory */ #define MSIC_BASE_ADDR_HI_REG 0x3 #define MSIC_BASE_ADDR_LO_REG 0x4 /* Hold the read/write offsets into the FIFO */ #define MSIC_READ_OFFSET_REG 0x5 #define MSIC_WRITE_OFFSET_REG 0x6 /* MSIC control register flags */ #define MSIC_CTRL_ENABLE 0x0001 #define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002 #define MSIC_CTRL_IRQ_ENABLE 0x0008 #define MSIC_CTRL_FULL_STOP_ENABLE 0x0010 /* * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB. * Currently we're using a 64KB FIFO size. */ #define MSIC_FIFO_SIZE_SHIFT 16 #define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT) /* * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits * 8-9 of the MSIC control reg. */ #define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300) /* * We need to mask the read/write offsets to make sure they stay within * the bounds of the FIFO. Also they should always be 16-byte aligned. */ #define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu) /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */ #define MSIC_FIFO_ENTRY_SIZE 0x10 struct axon_msic { struct irq_domain *irq_domain; __le32 *fifo_virt; dma_addr_t fifo_phys; dcr_host_t dcr_host; u32 read_offset; #ifdef DEBUG u32 __iomem *trigger; #endif }; #ifdef DEBUG void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic); #else static inline void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) { } #endif static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) { pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); dcr_write(msic->dcr_host, dcr_n, val); } static void axon_msi_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct axon_msic *msic = irq_desc_get_handler_data(desc); u32 write_offset, msi; int idx; int retry = 0; write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); pr_devel("axon_msi: original write_offset 0x%x\n", write_offset); /* write_offset doesn't wrap properly, so we have to mask it */ write_offset &= MSIC_FIFO_SIZE_MASK; while (msic->read_offset != write_offset && retry < 100) { idx = msic->read_offset / sizeof(__le32); msi = le32_to_cpu(msic->fifo_virt[idx]); msi &= 0xFFFF; pr_devel("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { generic_handle_irq(msi); msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); } else { /* * Reading the MSIC_WRITE_OFFSET_REG does not * reliably flush the outstanding DMA to the * FIFO buffer. Here we were reading stale * data, so we need to retry. */ udelay(1); retry++; pr_devel("axon_msi: invalid irq 0x%x!\n", msi); continue; } if (retry) { pr_devel("axon_msi: late irq 0x%x, retry %d\n", msi, retry); retry = 0; } msic->read_offset += MSIC_FIFO_ENTRY_SIZE; msic->read_offset &= MSIC_FIFO_SIZE_MASK; } if (retry) { printk(KERN_WARNING "axon_msi: irq timed out\n"); msic->read_offset += MSIC_FIFO_ENTRY_SIZE; msic->read_offset &= MSIC_FIFO_SIZE_MASK; } chip->irq_eoi(&desc->irq_data); } static struct axon_msic *find_msi_translator(struct pci_dev *dev) { struct irq_domain *irq_domain; struct device_node *dn, *tmp; const phandle *ph; struct axon_msic *msic = NULL; dn = of_node_get(pci_device_to_OF_node(dev)); if (!dn) { dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); return NULL; } for (; dn; dn = of_get_next_parent(dn)) { ph = of_get_property(dn, "msi-translator", NULL); if (ph) break; } if (!ph) { dev_dbg(&dev->dev, "axon_msi: no msi-translator property found\n"); goto out_error; } tmp = dn; dn = of_find_node_by_phandle(*ph); of_node_put(tmp); if (!dn) { dev_dbg(&dev->dev, "axon_msi: msi-translator doesn't point to a node\n"); goto out_error; } irq_domain = irq_find_host(dn); if (!irq_domain) { dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n", dn); goto out_error; } msic = irq_domain->host_data; out_error: of_node_put(dn); return msic; } static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg) { struct device_node *dn; int len; const u32 *prop; dn = of_node_get(pci_device_to_OF_node(dev)); if (!dn) { dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); return -ENODEV; } for (; dn; dn = of_get_next_parent(dn)) { if (!dev->no_64bit_msi) { prop = of_get_property(dn, "msi-address-64", &len); if (prop) break; } prop = of_get_property(dn, "msi-address-32", &len); if (prop) break; } if (!prop) { dev_dbg(&dev->dev, "axon_msi: no msi-address-(32|64) properties found\n"); of_node_put(dn); return -ENOENT; } switch (len) { case 8: msg->address_hi = prop[0]; msg->address_lo = prop[1]; break; case 4: msg->address_hi = 0; msg->address_lo = prop[0]; break; default: dev_dbg(&dev->dev, "axon_msi: malformed msi-address-(32|64) property\n"); of_node_put(dn); return -EINVAL; } of_node_put(dn); return 0; } static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { unsigned int virq, rc; struct msi_desc *entry; struct msi_msg msg; struct axon_msic *msic; msic = find_msi_translator(dev); if (!msic) return -ENODEV; rc = setup_msi_msg_address(dev, &msg); if (rc) return rc; msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) { virq = irq_create_direct_mapping(msic->irq_domain); if (!virq) { dev_warn(&dev->dev, "axon_msi: virq allocation failed!\n"); return -1; } dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); irq_set_msi_desc(virq, entry); msg.data = virq; pci_write_msi_msg(virq, &msg); } return 0; } static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) { struct msi_desc *entry; dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n"); msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) { irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); entry->irq = 0; } } static struct irq_chip msic_irq_chip = { .irq_mask = pci_msi_mask_irq, .irq_unmask = pci_msi_unmask_irq, .irq_shutdown = pci_msi_mask_irq, .name = "AXON-MSI", }; static int msic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_data(virq, h->host_data); irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); return 0; } static const struct irq_domain_ops msic_host_ops = { .map = msic_host_map, }; static void axon_msi_shutdown(struct platform_device *device) { struct axon_msic *msic = dev_get_drvdata(&device->dev); u32 tmp; pr_devel("axon_msi: disabling %pOF\n", irq_domain_get_of_node(msic->irq_domain)); tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; msic_dcr_write(msic, MSIC_CTRL_REG, tmp); } static int axon_msi_probe(struct platform_device *device) { struct device_node *dn = device->dev.of_node; struct axon_msic *msic; unsigned int virq; int dcr_base, dcr_len; pr_devel("axon_msi: setting up dn %pOF\n", dn); msic = kzalloc(sizeof(*msic), GFP_KERNEL); if (!msic) { printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n", dn); goto out; } dcr_base = dcr_resource_start(dn, 0); dcr_len = dcr_resource_len(dn, 0); if (dcr_base == 0 || dcr_len == 0) { printk(KERN_ERR "axon_msi: couldn't parse dcr properties on %pOF\n", dn); goto out_free_msic; } msic->dcr_host = dcr_map(dn, dcr_base, dcr_len); if (!DCR_MAP_OK(msic->dcr_host)) { printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n", dn); goto out_free_msic; } msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, &msic->fifo_phys, GFP_KERNEL); if (!msic->fifo_virt) { printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n", dn); goto out_free_msic; } virq = irq_of_parse_and_map(dn, 0); if (!virq) { printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n", dn); goto out_free_fifo; } memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); if (!msic->irq_domain) { printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n", dn); goto out_free_fifo; } irq_set_handler_data(virq, msic); irq_set_chained_handler(virq, axon_msi_cascade); pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); /* Enable the MSIC hardware */ msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32); msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG, msic->fifo_phys & 0xFFFFFFFF); msic_dcr_write(msic, MSIC_CTRL_REG, MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE | MSIC_CTRL_FIFO_SIZE); msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG) & MSIC_FIFO_SIZE_MASK; dev_set_drvdata(&device->dev, msic); cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs; cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs; axon_msi_debug_setup(dn, msic); printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn); return 0; out_free_fifo: dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt, msic->fifo_phys); out_free_msic: kfree(msic); out: return -1; } static const struct of_device_id axon_msi_device_id[] = { { .compatible = "ibm,axon-msic" }, {} }; static struct platform_driver axon_msi_driver = { .probe = axon_msi_probe, .shutdown = axon_msi_shutdown, .driver = { .name = "axon-msi", .of_match_table = axon_msi_device_id, }, }; static int __init axon_msi_init(void) { return platform_driver_register(&axon_msi_driver); } subsys_initcall(axon_msi_init); #ifdef DEBUG static int msic_set(void *data, u64 val) { struct axon_msic *msic = data; out_le32(msic->trigger, val); return 0; } static int msic_get(void *data, u64 *val) { *val = 0; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n"); void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) { char name[8]; struct resource res; if (of_address_to_resource(dn, 0, &res)) { pr_devel("axon_msi: couldn't get reg property\n"); return; } msic->trigger = ioremap(res.start, 0x4); if (!msic->trigger) { pr_devel("axon_msi: ioremap failed\n"); return; } snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn)); debugfs_create_file(name, 0600, arch_debugfs_dir, msic, &fops_msic); } #endif /* DEBUG */
linux-master
arch/powerpc/platforms/cell/axon_msi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Cell Internal Interrupt Controller * * Copyright (C) 2006 Benjamin Herrenschmidt ([email protected]) * IBM, Corp. * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Arnd Bergmann <[email protected]> * * TODO: * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers * vs node numbers in the setup code * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from * a non-active node to the active node) */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/export.h> #include <linux/percpu.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/kernel_stat.h> #include <linux/pgtable.h> #include <linux/of_address.h> #include <asm/io.h> #include <asm/ptrace.h> #include <asm/machdep.h> #include <asm/cell-regs.h> #include "interrupt.h" struct iic { struct cbe_iic_thread_regs __iomem *regs; u8 target_id; u8 eoi_stack[16]; int eoi_ptr; struct device_node *node; }; static DEFINE_PER_CPU(struct iic, cpu_iic); #define IIC_NODE_COUNT 2 static struct irq_domain *iic_host; /* Convert between "pending" bits and hw irq number */ static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) { unsigned char unit = bits.source & 0xf; unsigned char node = bits.source >> 4; unsigned char class = bits.class & 3; /* Decode IPIs */ if (bits.flags & CBE_IIC_IRQ_IPI) return IIC_IRQ_TYPE_IPI | (bits.prio >> 4); else return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit; } static void iic_mask(struct irq_data *d) { } static void iic_unmask(struct irq_data *d) { } static void iic_eoi(struct irq_data *d) { struct iic *iic = this_cpu_ptr(&cpu_iic); out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); BUG_ON(iic->eoi_ptr < 0); } static struct irq_chip iic_chip = { .name = "CELL-IIC", .irq_mask = iic_mask, .irq_unmask = iic_unmask, .irq_eoi = iic_eoi, }; static void iic_ioexc_eoi(struct irq_data *d) { } static void iic_ioexc_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct cbe_iic_regs __iomem *node_iic = (void __iomem *)irq_desc_get_handler_data(desc); unsigned int irq = irq_desc_get_irq(desc); unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; unsigned long bits, ack; int cascade; for (;;) { bits = in_be64(&node_iic->iic_is); if (bits == 0) break; /* pre-ack edge interrupts */ ack = bits & IIC_ISR_EDGE_MASK; if (ack) out_be64(&node_iic->iic_is, ack); /* handle them */ for (cascade = 63; cascade >= 0; cascade--) if (bits & (0x8000000000000000UL >> cascade)) generic_handle_domain_irq(iic_host, base | cascade); /* post-ack level interrupts */ ack = bits & ~IIC_ISR_EDGE_MASK; if (ack) out_be64(&node_iic->iic_is, ack); } chip->irq_eoi(&desc->irq_data); } static struct irq_chip iic_ioexc_chip = { .name = "CELL-IOEX", .irq_mask = iic_mask, .irq_unmask = iic_unmask, .irq_eoi = iic_ioexc_eoi, }; /* Get an IRQ number from the pending state register of the IIC */ static unsigned int iic_get_irq(void) { struct cbe_iic_pending_bits pending; struct iic *iic; unsigned int virq; iic = this_cpu_ptr(&cpu_iic); *(unsigned long *) &pending = in_be64((u64 __iomem *) &iic->regs->pending_destr); if (!(pending.flags & CBE_IIC_IRQ_VALID)) return 0; virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending)); if (!virq) return 0; iic->eoi_stack[++iic->eoi_ptr] = pending.prio; BUG_ON(iic->eoi_ptr > 15); return virq; } void iic_setup_cpu(void) { out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff); } u8 iic_get_target_id(int cpu) { return per_cpu(cpu_iic, cpu).target_id; } EXPORT_SYMBOL_GPL(iic_get_target_id); #ifdef CONFIG_SMP /* Use the highest interrupt priorities for IPI */ static inline int iic_msg_to_irq(int msg) { return IIC_IRQ_TYPE_IPI + 0xf - msg; } void iic_message_pass(int cpu, int msg) { out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); } static void iic_request_ipi(int msg) { int virq; virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg)); if (!virq) { printk(KERN_ERR "iic: failed to map IPI %s\n", smp_ipi_name[msg]); return; } /* * If smp_request_message_ipi encounters an error it will notify * the error. If a message is not needed it will return non-zero. */ if (smp_request_message_ipi(virq, msg)) irq_dispose_mapping(virq); } void iic_request_IPIs(void) { iic_request_ipi(PPC_MSG_CALL_FUNCTION); iic_request_ipi(PPC_MSG_RESCHEDULE); iic_request_ipi(PPC_MSG_TICK_BROADCAST); iic_request_ipi(PPC_MSG_NMI_IPI); } #endif /* CONFIG_SMP */ static int iic_host_match(struct irq_domain *h, struct device_node *node, enum irq_domain_bus_token bus_token) { return of_device_is_compatible(node, "IBM,CBEA-Internal-Interrupt-Controller"); } static int iic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { switch (hw & IIC_IRQ_TYPE_MASK) { case IIC_IRQ_TYPE_IPI: irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq); break; case IIC_IRQ_TYPE_IOEXC: irq_set_chip_and_handler(virq, &iic_ioexc_chip, handle_edge_eoi_irq); break; default: irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); } return 0; } static int iic_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { unsigned int node, ext, unit, class; const u32 *val; if (!of_device_is_compatible(ct, "IBM,CBEA-Internal-Interrupt-Controller")) return -ENODEV; if (intsize != 1) return -ENODEV; val = of_get_property(ct, "#interrupt-cells", NULL); if (val == NULL || *val != 1) return -ENODEV; node = intspec[0] >> 24; ext = (intspec[0] >> 16) & 0xff; class = (intspec[0] >> 8) & 0xff; unit = intspec[0] & 0xff; /* Check if node is in supported range */ if (node > 1) return -EINVAL; /* Build up interrupt number, special case for IO exceptions */ *out_hwirq = (node << IIC_IRQ_NODE_SHIFT); if (unit == IIC_UNIT_IIC && class == 1) *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext; else *out_hwirq |= IIC_IRQ_TYPE_NORMAL | (class << IIC_IRQ_CLASS_SHIFT) | unit; /* Dummy flags, ignored by iic code */ *out_flags = IRQ_TYPE_EDGE_RISING; return 0; } static const struct irq_domain_ops iic_host_ops = { .match = iic_host_match, .map = iic_host_map, .xlate = iic_host_xlate, }; static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, struct device_node *node) { /* XXX FIXME: should locate the linux CPU number from the HW cpu * number properly. We are lucky for now */ struct iic *iic = &per_cpu(cpu_iic, hw_cpu); iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); BUG_ON(iic->regs == NULL); iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe); iic->eoi_stack[0] = 0xff; iic->node = of_node_get(node); out_be64(&iic->regs->prio, 0); printk(KERN_INFO "IIC for CPU %d target id 0x%x : %pOF\n", hw_cpu, iic->target_id, node); } static int __init setup_iic(void) { struct device_node *dn; struct resource r0, r1; unsigned int node, cascade, found = 0; struct cbe_iic_regs __iomem *node_iic; const u32 *np; for_each_node_by_name(dn, "interrupt-controller") { if (!of_device_is_compatible(dn, "IBM,CBEA-Internal-Interrupt-Controller")) continue; np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL); if (np == NULL) { printk(KERN_WARNING "IIC: CPU association not found\n"); of_node_put(dn); return -ENODEV; } if (of_address_to_resource(dn, 0, &r0) || of_address_to_resource(dn, 1, &r1)) { printk(KERN_WARNING "IIC: Can't resolve addresses\n"); of_node_put(dn); return -ENODEV; } found++; init_one_iic(np[0], r0.start, dn); init_one_iic(np[1], r1.start, dn); /* Setup cascade for IO exceptions. XXX cleanup tricks to get * node vs CPU etc... * Note that we configure the IIC_IRR here with a hard coded * priority of 1. We might want to improve that later. */ node = np[0] >> 1; node_iic = cbe_get_cpu_iic_regs(np[0]); cascade = node << IIC_IRQ_NODE_SHIFT; cascade |= 1 << IIC_IRQ_CLASS_SHIFT; cascade |= IIC_UNIT_IIC; cascade = irq_create_mapping(iic_host, cascade); if (!cascade) continue; /* * irq_data is a generic pointer that gets passed back * to us later, so the forced cast is fine. */ irq_set_handler_data(cascade, (void __force *)node_iic); irq_set_chained_handler(cascade, iic_ioexc_cascade); out_be64(&node_iic->iic_ir, (1 << 12) /* priority */ | (node << 4) /* dest node */ | IIC_UNIT_THREAD_0 /* route them to thread 0 */); /* Flush pending (make sure it triggers if there is * anything pending */ out_be64(&node_iic->iic_is, 0xfffffffffffffffful); } if (found) return 0; else return -ENODEV; } void __init iic_init_IRQ(void) { /* Setup an irq host data structure */ iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops, NULL); BUG_ON(iic_host == NULL); irq_set_default_host(iic_host); /* Discover and initialize iics */ if (setup_iic() < 0) panic("IIC: Failed to initialize !\n"); /* Set master interrupt handling function */ ppc_md.get_irq = iic_get_irq; /* Enable on current CPU */ iic_setup_cpu(); } void iic_set_interrupt_routing(int cpu, int thread, int priority) { struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu); u64 iic_ir = 0; int node = cpu >> 1; /* Set which node and thread will handle the next interrupt */ iic_ir |= CBE_IIC_IR_PRIO(priority) | CBE_IIC_IR_DEST_NODE(node); if (thread == 0) iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0); else iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1); out_be64(&iic_regs->iic_ir, iic_ir); }
linux-master
arch/powerpc/platforms/cell/interrupt.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Cell Broadband Engine Performance Monitor * * (C) Copyright IBM Corporation 2001,2006 * * Author: * David Erb ([email protected]) * Kevin Corry ([email protected]) */ #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/types.h> #include <linux/export.h> #include <asm/io.h> #include <asm/irq_regs.h> #include <asm/machdep.h> #include <asm/pmc.h> #include <asm/reg.h> #include <asm/spu.h> #include <asm/cell-regs.h> #include "interrupt.h" /* * When writing to write-only mmio addresses, save a shadow copy. All of the * registers are 32-bit, but stored in the upper-half of a 64-bit field in * pmd_regs. */ #define WRITE_WO_MMIO(reg, x) \ do { \ u32 _x = (x); \ struct cbe_pmd_regs __iomem *pmd_regs; \ struct cbe_pmd_shadow_regs *shadow_regs; \ pmd_regs = cbe_get_cpu_pmd_regs(cpu); \ shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \ out_be64(&(pmd_regs->reg), (((u64)_x) << 32)); \ shadow_regs->reg = _x; \ } while (0) #define READ_SHADOW_REG(val, reg) \ do { \ struct cbe_pmd_shadow_regs *shadow_regs; \ shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); \ (val) = shadow_regs->reg; \ } while (0) #define READ_MMIO_UPPER32(val, reg) \ do { \ struct cbe_pmd_regs __iomem *pmd_regs; \ pmd_regs = cbe_get_cpu_pmd_regs(cpu); \ (val) = (u32)(in_be64(&pmd_regs->reg) >> 32); \ } while (0) /* * Physical counter registers. * Each physical counter can act as one 32-bit counter or two 16-bit counters. */ u32 cbe_read_phys_ctr(u32 cpu, u32 phys_ctr) { u32 val_in_latch, val = 0; if (phys_ctr < NR_PHYS_CTRS) { READ_SHADOW_REG(val_in_latch, counter_value_in_latch); /* Read the latch or the actual counter, whichever is newer. */ if (val_in_latch & (1 << phys_ctr)) { READ_SHADOW_REG(val, pm_ctr[phys_ctr]); } else { READ_MMIO_UPPER32(val, pm_ctr[phys_ctr]); } } return val; } EXPORT_SYMBOL_GPL(cbe_read_phys_ctr); void cbe_write_phys_ctr(u32 cpu, u32 phys_ctr, u32 val) { struct cbe_pmd_shadow_regs *shadow_regs; u32 pm_ctrl; if (phys_ctr < NR_PHYS_CTRS) { /* Writing to a counter only writes to a hardware latch. * The new value is not propagated to the actual counter * until the performance monitor is enabled. */ WRITE_WO_MMIO(pm_ctr[phys_ctr], val); pm_ctrl = cbe_read_pm(cpu, pm_control); if (pm_ctrl & CBE_PM_ENABLE_PERF_MON) { /* The counters are already active, so we need to * rewrite the pm_control register to "re-enable" * the PMU. */ cbe_write_pm(cpu, pm_control, pm_ctrl); } else { shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); shadow_regs->counter_value_in_latch |= (1 << phys_ctr); } } } EXPORT_SYMBOL_GPL(cbe_write_phys_ctr); /* * "Logical" counter registers. * These will read/write 16-bits or 32-bits depending on the * current size of the counter. Counters 4 - 7 are always 16-bit. */ u32 cbe_read_ctr(u32 cpu, u32 ctr) { u32 val; u32 phys_ctr = ctr & (NR_PHYS_CTRS - 1); val = cbe_read_phys_ctr(cpu, phys_ctr); if (cbe_get_ctr_size(cpu, phys_ctr) == 16) val = (ctr < NR_PHYS_CTRS) ? (val >> 16) : (val & 0xffff); return val; } EXPORT_SYMBOL_GPL(cbe_read_ctr); void cbe_write_ctr(u32 cpu, u32 ctr, u32 val) { u32 phys_ctr; u32 phys_val; phys_ctr = ctr & (NR_PHYS_CTRS - 1); if (cbe_get_ctr_size(cpu, phys_ctr) == 16) { phys_val = cbe_read_phys_ctr(cpu, phys_ctr); if (ctr < NR_PHYS_CTRS) val = (val << 16) | (phys_val & 0xffff); else val = (val & 0xffff) | (phys_val & 0xffff0000); } cbe_write_phys_ctr(cpu, phys_ctr, val); } EXPORT_SYMBOL_GPL(cbe_write_ctr); /* * Counter-control registers. * Each "logical" counter has a corresponding control register. */ u32 cbe_read_pm07_control(u32 cpu, u32 ctr) { u32 pm07_control = 0; if (ctr < NR_CTRS) READ_SHADOW_REG(pm07_control, pm07_control[ctr]); return pm07_control; } EXPORT_SYMBOL_GPL(cbe_read_pm07_control); void cbe_write_pm07_control(u32 cpu, u32 ctr, u32 val) { if (ctr < NR_CTRS) WRITE_WO_MMIO(pm07_control[ctr], val); } EXPORT_SYMBOL_GPL(cbe_write_pm07_control); /* * Other PMU control registers. Most of these are write-only. */ u32 cbe_read_pm(u32 cpu, enum pm_reg_name reg) { u32 val = 0; switch (reg) { case group_control: READ_SHADOW_REG(val, group_control); break; case debug_bus_control: READ_SHADOW_REG(val, debug_bus_control); break; case trace_address: READ_MMIO_UPPER32(val, trace_address); break; case ext_tr_timer: READ_SHADOW_REG(val, ext_tr_timer); break; case pm_status: READ_MMIO_UPPER32(val, pm_status); break; case pm_control: READ_SHADOW_REG(val, pm_control); break; case pm_interval: READ_MMIO_UPPER32(val, pm_interval); break; case pm_start_stop: READ_SHADOW_REG(val, pm_start_stop); break; } return val; } EXPORT_SYMBOL_GPL(cbe_read_pm); void cbe_write_pm(u32 cpu, enum pm_reg_name reg, u32 val) { switch (reg) { case group_control: WRITE_WO_MMIO(group_control, val); break; case debug_bus_control: WRITE_WO_MMIO(debug_bus_control, val); break; case trace_address: WRITE_WO_MMIO(trace_address, val); break; case ext_tr_timer: WRITE_WO_MMIO(ext_tr_timer, val); break; case pm_status: WRITE_WO_MMIO(pm_status, val); break; case pm_control: WRITE_WO_MMIO(pm_control, val); break; case pm_interval: WRITE_WO_MMIO(pm_interval, val); break; case pm_start_stop: WRITE_WO_MMIO(pm_start_stop, val); break; } } EXPORT_SYMBOL_GPL(cbe_write_pm); /* * Get/set the size of a physical counter to either 16 or 32 bits. */ u32 cbe_get_ctr_size(u32 cpu, u32 phys_ctr) { u32 pm_ctrl, size = 0; if (phys_ctr < NR_PHYS_CTRS) { pm_ctrl = cbe_read_pm(cpu, pm_control); size = (pm_ctrl & CBE_PM_16BIT_CTR(phys_ctr)) ? 16 : 32; } return size; } EXPORT_SYMBOL_GPL(cbe_get_ctr_size); void cbe_set_ctr_size(u32 cpu, u32 phys_ctr, u32 ctr_size) { u32 pm_ctrl; if (phys_ctr < NR_PHYS_CTRS) { pm_ctrl = cbe_read_pm(cpu, pm_control); switch (ctr_size) { case 16: pm_ctrl |= CBE_PM_16BIT_CTR(phys_ctr); break; case 32: pm_ctrl &= ~CBE_PM_16BIT_CTR(phys_ctr); break; } cbe_write_pm(cpu, pm_control, pm_ctrl); } } EXPORT_SYMBOL_GPL(cbe_set_ctr_size); /* * Enable/disable the entire performance monitoring unit. * When we enable the PMU, all pending writes to counters get committed. */ void cbe_enable_pm(u32 cpu) { struct cbe_pmd_shadow_regs *shadow_regs; u32 pm_ctrl; shadow_regs = cbe_get_cpu_pmd_shadow_regs(cpu); shadow_regs->counter_value_in_latch = 0; pm_ctrl = cbe_read_pm(cpu, pm_control) | CBE_PM_ENABLE_PERF_MON; cbe_write_pm(cpu, pm_control, pm_ctrl); } EXPORT_SYMBOL_GPL(cbe_enable_pm); void cbe_disable_pm(u32 cpu) { u32 pm_ctrl; pm_ctrl = cbe_read_pm(cpu, pm_control) & ~CBE_PM_ENABLE_PERF_MON; cbe_write_pm(cpu, pm_control, pm_ctrl); } EXPORT_SYMBOL_GPL(cbe_disable_pm); /* * Reading from the trace_buffer. * The trace buffer is two 64-bit registers. Reading from * the second half automatically increments the trace_address. */ void cbe_read_trace_buffer(u32 cpu, u64 *buf) { struct cbe_pmd_regs __iomem *pmd_regs = cbe_get_cpu_pmd_regs(cpu); *buf++ = in_be64(&pmd_regs->trace_buffer_0_63); *buf++ = in_be64(&pmd_regs->trace_buffer_64_127); } EXPORT_SYMBOL_GPL(cbe_read_trace_buffer); /* * Enabling/disabling interrupts for the entire performance monitoring unit. */ u32 cbe_get_and_clear_pm_interrupts(u32 cpu) { /* Reading pm_status clears the interrupt bits. */ return cbe_read_pm(cpu, pm_status); } EXPORT_SYMBOL_GPL(cbe_get_and_clear_pm_interrupts); void cbe_enable_pm_interrupts(u32 cpu, u32 thread, u32 mask) { /* Set which node and thread will handle the next interrupt. */ iic_set_interrupt_routing(cpu, thread, 0); /* Enable the interrupt bits in the pm_status register. */ if (mask) cbe_write_pm(cpu, pm_status, mask); } EXPORT_SYMBOL_GPL(cbe_enable_pm_interrupts); void cbe_disable_pm_interrupts(u32 cpu) { cbe_get_and_clear_pm_interrupts(cpu); cbe_write_pm(cpu, pm_status, 0); } EXPORT_SYMBOL_GPL(cbe_disable_pm_interrupts); static irqreturn_t cbe_pm_irq(int irq, void *dev_id) { perf_irq(get_irq_regs()); return IRQ_HANDLED; } static int __init cbe_init_pm_irq(void) { unsigned int irq; int rc, node; for_each_online_node(node) { irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI | (node << IIC_IRQ_NODE_SHIFT)); if (!irq) { printk("ERROR: Unable to allocate irq for node %d\n", node); return -EINVAL; } rc = request_irq(irq, cbe_pm_irq, 0, "cbe-pmu-0", NULL); if (rc) { printk("ERROR: Request for irq on node %d failed\n", node); return rc; } } return 0; } machine_arch_initcall(cell, cbe_init_pm_irq); void cbe_sync_irq(int node) { unsigned int irq; irq = irq_find_mapping(NULL, IIC_IRQ_IOEX_PMI | (node << IIC_IRQ_NODE_SHIFT)); if (!irq) { printk(KERN_WARNING "ERROR, unable to get existing irq %d " \ "for node %d\n", irq, node); return; } synchronize_irq(irq); } EXPORT_SYMBOL_GPL(cbe_sync_irq);
linux-master
arch/powerpc/platforms/cell/pmu.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2006-2008, IBM Corporation. */ #undef DEBUG #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/reboot.h> #include <linux/kexec.h> #include <linux/crash_dump.h> #include <linux/of.h> #include <asm/kexec.h> #include <asm/reg.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/rtas.h> #include <asm/cell-regs.h> #include "ras.h" #include "pervasive.h" static void dump_fir(int cpu) { struct cbe_pmd_regs __iomem *pregs = cbe_get_cpu_pmd_regs(cpu); struct cbe_iic_regs __iomem *iregs = cbe_get_cpu_iic_regs(cpu); if (pregs == NULL) return; /* Todo: do some nicer parsing of bits and based on them go down * to other sub-units FIRs and not only IIC */ printk(KERN_ERR "Global Checkstop FIR : 0x%016llx\n", in_be64(&pregs->checkstop_fir)); printk(KERN_ERR "Global Recoverable FIR : 0x%016llx\n", in_be64(&pregs->checkstop_fir)); printk(KERN_ERR "Global MachineCheck FIR : 0x%016llx\n", in_be64(&pregs->spec_att_mchk_fir)); if (iregs == NULL) return; printk(KERN_ERR "IOC FIR : 0x%016llx\n", in_be64(&iregs->ioc_fir)); } DEFINE_INTERRUPT_HANDLER(cbe_system_error_exception) { int cpu = smp_processor_id(); printk(KERN_ERR "System Error Interrupt on CPU %d !\n", cpu); dump_fir(cpu); dump_stack(); } DEFINE_INTERRUPT_HANDLER(cbe_maintenance_exception) { int cpu = smp_processor_id(); /* * Nothing implemented for the maintenance interrupt at this point */ printk(KERN_ERR "Unhandled Maintenance interrupt on CPU %d !\n", cpu); dump_stack(); } DEFINE_INTERRUPT_HANDLER(cbe_thermal_exception) { int cpu = smp_processor_id(); /* * Nothing implemented for the thermal interrupt at this point */ printk(KERN_ERR "Unhandled Thermal interrupt on CPU %d !\n", cpu); dump_stack(); } static int cbe_machine_check_handler(struct pt_regs *regs) { int cpu = smp_processor_id(); printk(KERN_ERR "Machine Check Interrupt on CPU %d !\n", cpu); dump_fir(cpu); /* No recovery from this code now, lets continue */ return 0; } struct ptcal_area { struct list_head list; int nid; int order; struct page *pages; }; static LIST_HEAD(ptcal_list); static int ptcal_start_tok, ptcal_stop_tok; static int __init cbe_ptcal_enable_on_node(int nid, int order) { struct ptcal_area *area; int ret = -ENOMEM; unsigned long addr; if (is_kdump_kernel()) rtas_call(ptcal_stop_tok, 1, 1, NULL, nid); area = kmalloc(sizeof(*area), GFP_KERNEL); if (!area) goto out_err; area->nid = nid; area->order = order; area->pages = __alloc_pages_node(area->nid, GFP_KERNEL|__GFP_THISNODE, area->order); if (!area->pages) { printk(KERN_WARNING "%s: no page on node %d\n", __func__, area->nid); goto out_free_area; } /* * We move the ptcal area to the middle of the allocated * page, in order to avoid prefetches in memcpy and similar * functions stepping on it. */ addr = __pa(page_address(area->pages)) + (PAGE_SIZE >> 1); printk(KERN_DEBUG "%s: enabling PTCAL on node %d address=0x%016lx\n", __func__, area->nid, addr); ret = -EIO; if (rtas_call(ptcal_start_tok, 3, 1, NULL, area->nid, (unsigned int)(addr >> 32), (unsigned int)(addr & 0xffffffff))) { printk(KERN_ERR "%s: error enabling PTCAL on node %d!\n", __func__, nid); goto out_free_pages; } list_add(&area->list, &ptcal_list); return 0; out_free_pages: __free_pages(area->pages, area->order); out_free_area: kfree(area); out_err: return ret; } static int __init cbe_ptcal_enable(void) { const u32 *size; struct device_node *np; int order, found_mic = 0; np = of_find_node_by_path("/rtas"); if (!np) return -ENODEV; size = of_get_property(np, "ibm,cbe-ptcal-size", NULL); if (!size) { of_node_put(np); return -ENODEV; } pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size); order = get_order(*size); of_node_put(np); /* support for malta device trees, with be@/mic@ nodes */ for_each_node_by_type(np, "mic-tm") { cbe_ptcal_enable_on_node(of_node_to_nid(np), order); found_mic = 1; } if (found_mic) return 0; /* support for older device tree - use cpu nodes */ for_each_node_by_type(np, "cpu") { const u32 *nid = of_get_property(np, "node-id", NULL); if (!nid) { printk(KERN_ERR "%s: node %pOF is missing node-id?\n", __func__, np); continue; } cbe_ptcal_enable_on_node(*nid, order); found_mic = 1; } return found_mic ? 0 : -ENODEV; } static int cbe_ptcal_disable(void) { struct ptcal_area *area, *tmp; int ret = 0; pr_debug("%s: disabling PTCAL\n", __func__); list_for_each_entry_safe(area, tmp, &ptcal_list, list) { /* disable ptcal on this node */ if (rtas_call(ptcal_stop_tok, 1, 1, NULL, area->nid)) { printk(KERN_ERR "%s: error disabling PTCAL " "on node %d!\n", __func__, area->nid); ret = -EIO; continue; } /* ensure we can access the PTCAL area */ memset(page_address(area->pages), 0, 1 << (area->order + PAGE_SHIFT)); /* clean up */ list_del(&area->list); __free_pages(area->pages, area->order); kfree(area); } return ret; } static int cbe_ptcal_notify_reboot(struct notifier_block *nb, unsigned long code, void *data) { return cbe_ptcal_disable(); } static void cbe_ptcal_crash_shutdown(void) { cbe_ptcal_disable(); } static struct notifier_block cbe_ptcal_reboot_notifier = { .notifier_call = cbe_ptcal_notify_reboot }; #ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON static int sysreset_hack; static int __init cbe_sysreset_init(void) { struct cbe_pmd_regs __iomem *regs; sysreset_hack = of_machine_is_compatible("IBM,CBPLUS-1.0"); if (!sysreset_hack) return 0; regs = cbe_get_cpu_pmd_regs(0); if (!regs) return 0; /* Enable JTAG system-reset hack */ out_be32(&regs->fir_mode_reg, in_be32(&regs->fir_mode_reg) | CBE_PMD_FIR_MODE_M8); return 0; } device_initcall(cbe_sysreset_init); int cbe_sysreset_hack(void) { struct cbe_pmd_regs __iomem *regs; /* * The BMC can inject user triggered system reset exceptions, * but cannot set the system reset reason in srr1, * so check an extra register here. */ if (sysreset_hack && (smp_processor_id() == 0)) { regs = cbe_get_cpu_pmd_regs(0); if (!regs) return 0; if (in_be64(&regs->ras_esc_0) & 0x0000ffff) { out_be64(&regs->ras_esc_0, 0); return 0; } } return 1; } #endif /* CONFIG_PPC_IBM_CELL_RESETBUTTON */ static int __init cbe_ptcal_init(void) { int ret; ptcal_start_tok = rtas_function_token(RTAS_FN_IBM_CBE_START_PTCAL); ptcal_stop_tok = rtas_function_token(RTAS_FN_IBM_CBE_STOP_PTCAL); if (ptcal_start_tok == RTAS_UNKNOWN_SERVICE || ptcal_stop_tok == RTAS_UNKNOWN_SERVICE) return -ENODEV; ret = register_reboot_notifier(&cbe_ptcal_reboot_notifier); if (ret) goto out1; ret = crash_shutdown_register(&cbe_ptcal_crash_shutdown); if (ret) goto out2; return cbe_ptcal_enable(); out2: unregister_reboot_notifier(&cbe_ptcal_reboot_notifier); out1: printk(KERN_ERR "Can't disable PTCAL, so not enabling\n"); return ret; } arch_initcall(cbe_ptcal_init); void __init cbe_ras_init(void) { unsigned long hid0; /* * Enable System Error & thermal interrupts and wakeup conditions */ hid0 = mfspr(SPRN_HID0); hid0 |= HID0_CBE_THERM_INT_EN | HID0_CBE_THERM_WAKEUP | HID0_CBE_SYSERR_INT_EN | HID0_CBE_SYSERR_WAKEUP; mtspr(SPRN_HID0, hid0); mb(); /* * Install machine check handler. Leave setting of precise mode to * what the firmware did for now */ ppc_md.machine_check_exception = cbe_machine_check_handler; mb(); /* * For now, we assume that IOC_FIR is already set to forward some * error conditions to the System Error handler. If that is not true * then it will have to be fixed up here. */ }
linux-master
arch/powerpc/platforms/cell/ras.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * spu aware cpufreq governor for the cell processor * * © Copyright IBM Corporation 2006-2008 * * Author: Christian Krafft <[email protected]> */ #include <linux/cpufreq.h> #include <linux/sched.h> #include <linux/sched/loadavg.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/atomic.h> #include <asm/machdep.h> #include <asm/spu.h> #define POLL_TIME 100000 /* in µs */ #define EXP 753 /* exp(-1) in fixed-point */ struct spu_gov_info_struct { unsigned long busy_spus; /* fixed-point */ struct cpufreq_policy *policy; struct delayed_work work; unsigned int poll_int; /* µs */ }; static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info); static int calc_freq(struct spu_gov_info_struct *info) { int cpu; int busy_spus; cpu = info->policy->cpu; busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus); info->busy_spus = calc_load(info->busy_spus, EXP, busy_spus * FIXED_1); pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n", cpu, busy_spus, info->busy_spus); return info->policy->max * info->busy_spus / FIXED_1; } static void spu_gov_work(struct work_struct *work) { struct spu_gov_info_struct *info; int delay; unsigned long target_freq; info = container_of(work, struct spu_gov_info_struct, work.work); /* after cancel_delayed_work_sync we unset info->policy */ BUG_ON(info->policy == NULL); target_freq = calc_freq(info); __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H); delay = usecs_to_jiffies(info->poll_int); schedule_delayed_work_on(info->policy->cpu, &info->work, delay); } static void spu_gov_init_work(struct spu_gov_info_struct *info) { int delay = usecs_to_jiffies(info->poll_int); INIT_DEFERRABLE_WORK(&info->work, spu_gov_work); schedule_delayed_work_on(info->policy->cpu, &info->work, delay); } static void spu_gov_cancel_work(struct spu_gov_info_struct *info) { cancel_delayed_work_sync(&info->work); } static int spu_gov_start(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu); struct spu_gov_info_struct *affected_info; int i; if (!cpu_online(cpu)) { printk(KERN_ERR "cpu %d is not online\n", cpu); return -EINVAL; } if (!policy->cur) { printk(KERN_ERR "no cpu specified in policy\n"); return -EINVAL; } /* initialize spu_gov_info for all affected cpus */ for_each_cpu(i, policy->cpus) { affected_info = &per_cpu(spu_gov_info, i); affected_info->policy = policy; } info->poll_int = POLL_TIME; /* setup timer */ spu_gov_init_work(info); return 0; } static void spu_gov_stop(struct cpufreq_policy *policy) { unsigned int cpu = policy->cpu; struct spu_gov_info_struct *info = &per_cpu(spu_gov_info, cpu); int i; /* cancel timer */ spu_gov_cancel_work(info); /* clean spu_gov_info for all affected cpus */ for_each_cpu (i, policy->cpus) { info = &per_cpu(spu_gov_info, i); info->policy = NULL; } } static struct cpufreq_governor spu_governor = { .name = "spudemand", .start = spu_gov_start, .stop = spu_gov_stop, .owner = THIS_MODULE, }; cpufreq_governor_init(spu_governor); cpufreq_governor_exit(spu_governor); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Krafft <[email protected]>");
linux-master
arch/powerpc/platforms/cell/cpufreq_spudemand.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SPU file system -- system call stubs * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * (C) Copyright 2006-2007, IBM Corporation * * Author: Arnd Bergmann <[email protected]> */ #include <linux/file.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/syscalls.h> #include <linux/rcupdate.h> #include <linux/binfmts.h> #include <asm/spu.h> /* protected by rcu */ static struct spufs_calls *spufs_calls; #ifdef CONFIG_SPU_FS_MODULE static inline struct spufs_calls *spufs_calls_get(void) { struct spufs_calls *calls = NULL; rcu_read_lock(); calls = rcu_dereference(spufs_calls); if (calls && !try_module_get(calls->owner)) calls = NULL; rcu_read_unlock(); return calls; } static inline void spufs_calls_put(struct spufs_calls *calls) { BUG_ON(calls != spufs_calls); /* we don't need to rcu this, as we hold a reference to the module */ module_put(spufs_calls->owner); } #else /* !defined CONFIG_SPU_FS_MODULE */ static inline struct spufs_calls *spufs_calls_get(void) { return spufs_calls; } static inline void spufs_calls_put(struct spufs_calls *calls) { } #endif /* CONFIG_SPU_FS_MODULE */ SYSCALL_DEFINE4(spu_create, const char __user *, name, unsigned int, flags, umode_t, mode, int, neighbor_fd) { long ret; struct spufs_calls *calls; calls = spufs_calls_get(); if (!calls) return -ENOSYS; if (flags & SPU_CREATE_AFFINITY_SPU) { struct fd neighbor = fdget(neighbor_fd); ret = -EBADF; if (neighbor.file) { ret = calls->create_thread(name, flags, mode, neighbor.file); fdput(neighbor); } } else ret = calls->create_thread(name, flags, mode, NULL); spufs_calls_put(calls); return ret; } SYSCALL_DEFINE3(spu_run,int, fd, __u32 __user *, unpc, __u32 __user *, ustatus) { long ret; struct fd arg; struct spufs_calls *calls; calls = spufs_calls_get(); if (!calls) return -ENOSYS; ret = -EBADF; arg = fdget(fd); if (arg.file) { ret = calls->spu_run(arg.file, unpc, ustatus); fdput(arg); } spufs_calls_put(calls); return ret; } #ifdef CONFIG_COREDUMP int elf_coredump_extra_notes_size(void) { struct spufs_calls *calls; int ret; calls = spufs_calls_get(); if (!calls) return 0; ret = calls->coredump_extra_notes_size(); spufs_calls_put(calls); return ret; } int elf_coredump_extra_notes_write(struct coredump_params *cprm) { struct spufs_calls *calls; int ret; calls = spufs_calls_get(); if (!calls) return 0; ret = calls->coredump_extra_notes_write(cprm); spufs_calls_put(calls); return ret; } #endif void notify_spus_active(void) { struct spufs_calls *calls; calls = spufs_calls_get(); if (!calls) return; calls->notify_spus_active(); spufs_calls_put(calls); return; } int register_spu_syscalls(struct spufs_calls *calls) { if (spufs_calls) return -EBUSY; rcu_assign_pointer(spufs_calls, calls); return 0; } EXPORT_SYMBOL_GPL(register_spu_syscalls); void unregister_spu_syscalls(struct spufs_calls *calls) { BUG_ON(spufs_calls->owner != calls->owner); RCU_INIT_POINTER(spufs_calls, NULL); synchronize_rcu(); } EXPORT_SYMBOL_GPL(unregister_spu_syscalls);
linux-master
arch/powerpc/platforms/cell/spu_syscalls.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * External Interrupt Controller on Spider South Bridge * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Arnd Bergmann <[email protected]> */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/ioport.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/pgtable.h> #include <asm/io.h> #include "interrupt.h" /* register layout taken from Spider spec, table 7.4-4 */ enum { TIR_DEN = 0x004, /* Detection Enable Register */ TIR_MSK = 0x084, /* Mask Level Register */ TIR_EDC = 0x0c0, /* Edge Detection Clear Register */ TIR_PNDA = 0x100, /* Pending Register A */ TIR_PNDB = 0x104, /* Pending Register B */ TIR_CS = 0x144, /* Current Status Register */ TIR_LCSA = 0x150, /* Level Current Status Register A */ TIR_LCSB = 0x154, /* Level Current Status Register B */ TIR_LCSC = 0x158, /* Level Current Status Register C */ TIR_LCSD = 0x15c, /* Level Current Status Register D */ TIR_CFGA = 0x200, /* Setting Register A0 */ TIR_CFGB = 0x204, /* Setting Register B0 */ /* 0x208 ... 0x3ff Setting Register An/Bn */ TIR_PPNDA = 0x400, /* Packet Pending Register A */ TIR_PPNDB = 0x404, /* Packet Pending Register B */ TIR_PIERA = 0x408, /* Packet Output Error Register A */ TIR_PIERB = 0x40c, /* Packet Output Error Register B */ TIR_PIEN = 0x444, /* Packet Output Enable Register */ TIR_PIPND = 0x454, /* Packet Output Pending Register */ TIRDID = 0x484, /* Spider Device ID Register */ REISTIM = 0x500, /* Reissue Command Timeout Time Setting */ REISTIMEN = 0x504, /* Reissue Command Timeout Setting */ REISWAITEN = 0x508, /* Reissue Wait Control*/ }; #define SPIDER_CHIP_COUNT 4 #define SPIDER_SRC_COUNT 64 #define SPIDER_IRQ_INVALID 63 struct spider_pic { struct irq_domain *host; void __iomem *regs; unsigned int node_id; }; static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d) { return irq_data_get_irq_chip_data(d); } static void __iomem *spider_get_irq_config(struct spider_pic *pic, unsigned int src) { return pic->regs + TIR_CFGA + 8 * src; } static void spider_unmask_irq(struct irq_data *d) { struct spider_pic *pic = spider_irq_data_to_pic(d); void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); out_be32(cfg, in_be32(cfg) | 0x30000000u); } static void spider_mask_irq(struct irq_data *d) { struct spider_pic *pic = spider_irq_data_to_pic(d); void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); out_be32(cfg, in_be32(cfg) & ~0x30000000u); } static void spider_ack_irq(struct irq_data *d) { struct spider_pic *pic = spider_irq_data_to_pic(d); unsigned int src = irqd_to_hwirq(d); /* Reset edge detection logic if necessary */ if (irqd_is_level_type(d)) return; /* Only interrupts 47 to 50 can be set to edge */ if (src < 47 || src > 50) return; /* Perform the clear of the edge logic */ out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); } static int spider_set_irq_type(struct irq_data *d, unsigned int type) { unsigned int sense = type & IRQ_TYPE_SENSE_MASK; struct spider_pic *pic = spider_irq_data_to_pic(d); unsigned int hw = irqd_to_hwirq(d); void __iomem *cfg = spider_get_irq_config(pic, hw); u32 old_mask; u32 ic; /* Note that only level high is supported for most interrupts */ if (sense != IRQ_TYPE_NONE && sense != IRQ_TYPE_LEVEL_HIGH && (hw < 47 || hw > 50)) return -EINVAL; /* Decode sense type */ switch(sense) { case IRQ_TYPE_EDGE_RISING: ic = 0x3; break; case IRQ_TYPE_EDGE_FALLING: ic = 0x2; break; case IRQ_TYPE_LEVEL_LOW: ic = 0x0; break; case IRQ_TYPE_LEVEL_HIGH: case IRQ_TYPE_NONE: ic = 0x1; break; default: return -EINVAL; } /* Configure the source. One gross hack that was there before and * that I've kept around is the priority to the BE which I set to * be the same as the interrupt source number. I don't know whether * that's supposed to make any kind of sense however, we'll have to * decide that, but for now, I'm not changing the behaviour. */ old_mask = in_be32(cfg) & 0x30000000u; out_be32(cfg, old_mask | (ic << 24) | (0x7 << 16) | (pic->node_id << 4) | 0xe); out_be32(cfg + 4, (0x2 << 16) | (hw & 0xff)); return 0; } static struct irq_chip spider_pic = { .name = "SPIDER", .irq_unmask = spider_unmask_irq, .irq_mask = spider_mask_irq, .irq_ack = spider_ack_irq, .irq_set_type = spider_set_irq_type, }; static int spider_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_data(virq, h->host_data); irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq); /* Set default irq type */ irq_set_irq_type(virq, IRQ_TYPE_NONE); return 0; } static int spider_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { /* Spider interrupts have 2 cells, first is the interrupt source, * second, well, I don't know for sure yet ... We mask the top bits * because old device-trees encode a node number in there */ *out_hwirq = intspec[0] & 0x3f; *out_flags = IRQ_TYPE_LEVEL_HIGH; return 0; } static const struct irq_domain_ops spider_host_ops = { .map = spider_host_map, .xlate = spider_host_xlate, }; static void spider_irq_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct spider_pic *pic = irq_desc_get_handler_data(desc); unsigned int cs; cs = in_be32(pic->regs + TIR_CS) >> 24; if (cs != SPIDER_IRQ_INVALID) generic_handle_domain_irq(pic->host, cs); chip->irq_eoi(&desc->irq_data); } /* For hooking up the cascade we have a problem. Our device-tree is * crap and we don't know on which BE iic interrupt we are hooked on at * least not the "standard" way. We can reconstitute it based on two * informations though: which BE node we are connected to and whether * we are connected to IOIF0 or IOIF1. Right now, we really only care * about the IBM cell blade and we know that its firmware gives us an * interrupt-map property which is pretty strange. */ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) { unsigned int virq; const u32 *imap, *tmp; int imaplen, intsize, unit; struct device_node *iic; struct device_node *of_node; of_node = irq_domain_get_of_node(pic->host); /* First, we check whether we have a real "interrupts" in the device * tree in case the device-tree is ever fixed */ virq = irq_of_parse_and_map(of_node, 0); if (virq) return virq; /* Now do the horrible hacks */ tmp = of_get_property(of_node, "#interrupt-cells", NULL); if (tmp == NULL) return 0; intsize = *tmp; imap = of_get_property(of_node, "interrupt-map", &imaplen); if (imap == NULL || imaplen < (intsize + 1)) return 0; iic = of_find_node_by_phandle(imap[intsize]); if (iic == NULL) return 0; imap += intsize + 1; tmp = of_get_property(iic, "#interrupt-cells", NULL); if (tmp == NULL) { of_node_put(iic); return 0; } intsize = *tmp; /* Assume unit is last entry of interrupt specifier */ unit = imap[intsize - 1]; /* Ok, we have a unit, now let's try to get the node */ tmp = of_get_property(iic, "ibm,interrupt-server-ranges", NULL); if (tmp == NULL) { of_node_put(iic); return 0; } /* ugly as hell but works for now */ pic->node_id = (*tmp) >> 1; of_node_put(iic); /* Ok, now let's get cracking. You may ask me why I just didn't match * the iic host from the iic OF node, but that way I'm still compatible * with really really old old firmwares for which we don't have a node */ /* Manufacture an IIC interrupt number of class 2 */ virq = irq_create_mapping(NULL, (pic->node_id << IIC_IRQ_NODE_SHIFT) | (2 << IIC_IRQ_CLASS_SHIFT) | unit); if (!virq) printk(KERN_ERR "spider_pic: failed to map cascade !"); return virq; } static void __init spider_init_one(struct device_node *of_node, int chip, unsigned long addr) { struct spider_pic *pic = &spider_pics[chip]; int i, virq; /* Map registers */ pic->regs = ioremap(addr, 0x1000); if (pic->regs == NULL) panic("spider_pic: can't map registers !"); /* Allocate a host */ pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT, &spider_host_ops, pic); if (pic->host == NULL) panic("spider_pic: can't allocate irq host !"); /* Go through all sources and disable them */ for (i = 0; i < SPIDER_SRC_COUNT; i++) { void __iomem *cfg = pic->regs + TIR_CFGA + 8 * i; out_be32(cfg, in_be32(cfg) & ~0x30000000u); } /* do not mask any interrupts because of level */ out_be32(pic->regs + TIR_MSK, 0x0); /* enable interrupt packets to be output */ out_be32(pic->regs + TIR_PIEN, in_be32(pic->regs + TIR_PIEN) | 0x1); /* Hook up the cascade interrupt to the iic and nodeid */ virq = spider_find_cascade_and_node(pic); if (!virq) return; irq_set_handler_data(virq, pic); irq_set_chained_handler(virq, spider_irq_cascade); printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %pOF\n", pic->node_id, addr, of_node); /* Enable the interrupt detection enable bit. Do this last! */ out_be32(pic->regs + TIR_DEN, in_be32(pic->regs + TIR_DEN) | 0x1); } void __init spider_init_IRQ(void) { struct resource r; struct device_node *dn; int chip = 0; /* XXX node numbers are totally bogus. We _hope_ we get the device * nodes in the right order here but that's definitely not guaranteed, * we need to get the node from the device tree instead. * There is currently no proper property for it (but our whole * device-tree is bogus anyway) so all we can do is pray or maybe test * the address and deduce the node-id */ for_each_node_by_name(dn, "interrupt-controller") { if (of_device_is_compatible(dn, "CBEA,platform-spider-pic")) { if (of_address_to_resource(dn, 0, &r)) { printk(KERN_WARNING "spider-pic: Failed\n"); continue; } } else if (of_device_is_compatible(dn, "sti,platform-spider-pic") && (chip < 2)) { static long hard_coded_pics[] = { 0x24000008000ul, 0x34000008000ul}; r.start = hard_coded_pics[chip]; } else continue; spider_init_one(dn, chip++, r.start); } }
linux-master
arch/powerpc/platforms/cell/spider-pic.c
// SPDX-License-Identifier: GPL-2.0-only /* * System call callback functions for SPUs */ #undef DEBUG #include <linux/kallsyms.h> #include <linux/export.h> #include <linux/syscalls.h> #include <asm/spu.h> #include <asm/syscalls.h> #include <asm/unistd.h> /* * This table defines the system calls that an SPU can call. * It is currently a subset of the 64 bit powerpc system calls, * with the exact semantics. * * The reasons for disabling some of the system calls are: * 1. They interact with the way SPU syscalls are handled * and we can't let them execute ever: * restart_syscall, exit, for, execve, ptrace, ... * 2. They are deprecated and replaced by other means: * uselib, pciconfig_*, sysfs, ... * 3. They are somewhat interacting with the system in a way * we don't want an SPU to: * reboot, init_module, mount, kexec_load * 4. They are optional and we can't rely on them being * linked into the kernel. Unfortunately, the cond_syscall * helper does not work here as it does not add the necessary * opd symbols: * mbind, mq_open, ipc, ... */ static const syscall_fn spu_syscall_table[] = { #define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry) #define __SYSCALL(nr, entry) [nr] = (void *) entry, #include <asm/syscall_table_spu.h> }; long spu_sys_callback(struct spu_syscall_block *s) { syscall_fn syscall; if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) { pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret); return -ENOSYS; } syscall = spu_syscall_table[s->nr_ret]; pr_debug("SPU-syscall " "%pSR:syscall%lld(%llx, %llx, %llx, %llx, %llx, %llx)\n", syscall, s->nr_ret, s->parm[0], s->parm[1], s->parm[2], s->parm[3], s->parm[4], s->parm[5]); return syscall(s->parm[0], s->parm[1], s->parm[2], s->parm[3], s->parm[4], s->parm[5]); } EXPORT_SYMBOL_GPL(spu_sys_callback);
linux-master
arch/powerpc/platforms/cell/spu_callbacks.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Low-level SPU handling * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Arnd Bergmann <[email protected]> */ #undef DEBUG #include <linux/interrupt.h> #include <linux/list.h> #include <linux/init.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/linux_logo.h> #include <linux/syscore_ops.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/spu_csa.h> #include <asm/xmon.h> #include <asm/kexec.h> const struct spu_management_ops *spu_management_ops; EXPORT_SYMBOL_GPL(spu_management_ops); const struct spu_priv1_ops *spu_priv1_ops; EXPORT_SYMBOL_GPL(spu_priv1_ops); struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; EXPORT_SYMBOL_GPL(cbe_spu_info); /* * The spufs fault-handling code needs to call force_sig_fault to raise signals * on DMA errors. Export it here to avoid general kernel-wide access to this * function */ EXPORT_SYMBOL_GPL(force_sig_fault); /* * Protects cbe_spu_info and spu->number. */ static DEFINE_SPINLOCK(spu_lock); /* * List of all spus in the system. * * This list is iterated by callers from irq context and callers that * want to sleep. Thus modifications need to be done with both * spu_full_list_lock and spu_full_list_mutex held, while iterating * through it requires either of these locks. * * In addition spu_full_list_lock protects all assignments to * spu->mm. */ static LIST_HEAD(spu_full_list); static DEFINE_SPINLOCK(spu_full_list_lock); static DEFINE_MUTEX(spu_full_list_mutex); void spu_invalidate_slbs(struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; unsigned long flags; spin_lock_irqsave(&spu->register_lock, flags); if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) out_be64(&priv2->slb_invalidate_all_W, 0UL); spin_unlock_irqrestore(&spu->register_lock, flags); } EXPORT_SYMBOL_GPL(spu_invalidate_slbs); /* This is called by the MM core when a segment size is changed, to * request a flush of all the SPEs using a given mm */ void spu_flush_all_slbs(struct mm_struct *mm) { struct spu *spu; unsigned long flags; spin_lock_irqsave(&spu_full_list_lock, flags); list_for_each_entry(spu, &spu_full_list, full_list) { if (spu->mm == mm) spu_invalidate_slbs(spu); } spin_unlock_irqrestore(&spu_full_list_lock, flags); } /* The hack below stinks... try to do something better one of * these days... Does it even work properly with NR_CPUS == 1 ? */ static inline void mm_needs_global_tlbie(struct mm_struct *mm) { int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; /* Global TLBIE broadcast required with SPEs. */ bitmap_fill(cpumask_bits(mm_cpumask(mm)), nr); } void spu_associate_mm(struct spu *spu, struct mm_struct *mm) { unsigned long flags; spin_lock_irqsave(&spu_full_list_lock, flags); spu->mm = mm; spin_unlock_irqrestore(&spu_full_list_lock, flags); if (mm) mm_needs_global_tlbie(mm); } EXPORT_SYMBOL_GPL(spu_associate_mm); int spu_64k_pages_available(void) { return mmu_psize_defs[MMU_PAGE_64K].shift != 0; } EXPORT_SYMBOL_GPL(spu_64k_pages_available); static void spu_restart_dma(struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); else { set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); mb(); } } static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb) { struct spu_priv2 __iomem *priv2 = spu->priv2; pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n", __func__, slbe, slb->vsid, slb->esid); out_be64(&priv2->slb_index_W, slbe); /* set invalid before writing vsid */ out_be64(&priv2->slb_esid_RW, 0); /* now it's safe to write the vsid */ out_be64(&priv2->slb_vsid_RW, slb->vsid); /* setting the new esid makes the entry valid again */ out_be64(&priv2->slb_esid_RW, slb->esid); } static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) { struct copro_slb slb; int ret; ret = copro_calculate_slb(spu->mm, ea, &slb); if (ret) return ret; spu_load_slb(spu, spu->slb_replace, &slb); spu->slb_replace++; if (spu->slb_replace >= 8) spu->slb_replace = 0; spu_restart_dma(spu); spu->stats.slb_flt++; return 0; } extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap, unsigned long dsisr); //XXX static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) { int ret; pr_debug("%s, %llx, %lx\n", __func__, dsisr, ea); /* * Handle kernel space hash faults immediately. User hash * faults need to be deferred to process context. */ if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) && (get_region_id(ea) != USER_REGION_ID)) { spin_unlock(&spu->register_lock); ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED, 0x300, dsisr); spin_lock(&spu->register_lock); if (!ret) { spu_restart_dma(spu); return 0; } } spu->class_1_dar = ea; spu->class_1_dsisr = dsisr; spu->stop_callback(spu, 1); spu->class_1_dar = 0; spu->class_1_dsisr = 0; return 0; } static void __spu_kernel_slb(void *addr, struct copro_slb *slb) { unsigned long ea = (unsigned long)addr; u64 llp; if (get_region_id(ea) == LINEAR_MAP_REGION_ID) llp = mmu_psize_defs[mmu_linear_psize].sllp; else llp = mmu_psize_defs[mmu_virtual_psize].sllp; slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | SLB_VSID_KERNEL | llp; slb->esid = (ea & ESID_MASK) | SLB_ESID_V; } /** * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the * address @new_addr is present. */ static inline int __slb_present(struct copro_slb *slbs, int nr_slbs, void *new_addr) { unsigned long ea = (unsigned long)new_addr; int i; for (i = 0; i < nr_slbs; i++) if (!((slbs[i].esid ^ ea) & ESID_MASK)) return 1; return 0; } /** * Setup the SPU kernel SLBs, in preparation for a context save/restore. We * need to map both the context save area, and the save/restore code. * * Because the lscsa and code may cross segment boundaries, we check to see * if mappings are required for the start and end of each range. We currently * assume that the mappings are smaller that one segment - if not, something * is seriously wrong. */ void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, void *code, int code_size) { struct copro_slb slbs[4]; int i, nr_slbs = 0; /* start and end addresses of both mappings */ void *addrs[] = { lscsa, (void *)lscsa + sizeof(*lscsa) - 1, code, code + code_size - 1 }; /* check the set of addresses, and create a new entry in the slbs array * if there isn't already a SLB for that address */ for (i = 0; i < ARRAY_SIZE(addrs); i++) { if (__slb_present(slbs, nr_slbs, addrs[i])) continue; __spu_kernel_slb(addrs[i], &slbs[nr_slbs]); nr_slbs++; } spin_lock_irq(&spu->register_lock); /* Add the set of SLBs */ for (i = 0; i < nr_slbs; i++) spu_load_slb(spu, i, &slbs[i]); spin_unlock_irq(&spu->register_lock); } EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); static irqreturn_t spu_irq_class_0(int irq, void *data) { struct spu *spu; unsigned long stat, mask; spu = data; spin_lock(&spu->register_lock); mask = spu_int_mask_get(spu, 0); stat = spu_int_stat_get(spu, 0) & mask; spu->class_0_pending |= stat; spu->class_0_dar = spu_mfc_dar_get(spu); spu->stop_callback(spu, 0); spu->class_0_pending = 0; spu->class_0_dar = 0; spu_int_stat_clear(spu, 0, stat); spin_unlock(&spu->register_lock); return IRQ_HANDLED; } static irqreturn_t spu_irq_class_1(int irq, void *data) { struct spu *spu; unsigned long stat, mask, dar, dsisr; spu = data; /* atomically read & clear class1 status. */ spin_lock(&spu->register_lock); mask = spu_int_mask_get(spu, 1); stat = spu_int_stat_get(spu, 1) & mask; dar = spu_mfc_dar_get(spu); dsisr = spu_mfc_dsisr_get(spu); if (stat & CLASS1_STORAGE_FAULT_INTR) spu_mfc_dsisr_set(spu, 0ul); spu_int_stat_clear(spu, 1, stat); pr_debug("%s: %lx %lx %lx %lx\n", __func__, mask, stat, dar, dsisr); if (stat & CLASS1_SEGMENT_FAULT_INTR) __spu_trap_data_seg(spu, dar); if (stat & CLASS1_STORAGE_FAULT_INTR) __spu_trap_data_map(spu, dar, dsisr); spu->class_1_dsisr = 0; spu->class_1_dar = 0; spin_unlock(&spu->register_lock); return stat ? IRQ_HANDLED : IRQ_NONE; } static irqreturn_t spu_irq_class_2(int irq, void *data) { struct spu *spu; unsigned long stat; unsigned long mask; const int mailbox_intrs = CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR; spu = data; spin_lock(&spu->register_lock); stat = spu_int_stat_get(spu, 2); mask = spu_int_mask_get(spu, 2); /* ignore interrupts we're not waiting for */ stat &= mask; /* mailbox interrupts are level triggered. mask them now before * acknowledging */ if (stat & mailbox_intrs) spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); /* acknowledge all interrupts before the callbacks */ spu_int_stat_clear(spu, 2, stat); pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); if (stat & CLASS2_MAILBOX_INTR) spu->ibox_callback(spu); if (stat & CLASS2_SPU_STOP_INTR) spu->stop_callback(spu, 2); if (stat & CLASS2_SPU_HALT_INTR) spu->stop_callback(spu, 2); if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) spu->mfc_callback(spu); if (stat & CLASS2_MAILBOX_THRESHOLD_INTR) spu->wbox_callback(spu); spu->stats.class2_intr++; spin_unlock(&spu->register_lock); return stat ? IRQ_HANDLED : IRQ_NONE; } static int __init spu_request_irqs(struct spu *spu) { int ret = 0; if (spu->irqs[0]) { snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number); ret = request_irq(spu->irqs[0], spu_irq_class_0, 0, spu->irq_c0, spu); if (ret) goto bail0; } if (spu->irqs[1]) { snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); ret = request_irq(spu->irqs[1], spu_irq_class_1, 0, spu->irq_c1, spu); if (ret) goto bail1; } if (spu->irqs[2]) { snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); ret = request_irq(spu->irqs[2], spu_irq_class_2, 0, spu->irq_c2, spu); if (ret) goto bail2; } return 0; bail2: if (spu->irqs[1]) free_irq(spu->irqs[1], spu); bail1: if (spu->irqs[0]) free_irq(spu->irqs[0], spu); bail0: return ret; } static void spu_free_irqs(struct spu *spu) { if (spu->irqs[0]) free_irq(spu->irqs[0], spu); if (spu->irqs[1]) free_irq(spu->irqs[1], spu); if (spu->irqs[2]) free_irq(spu->irqs[2], spu); } void spu_init_channels(struct spu *spu) { static const struct { unsigned channel; unsigned count; } zero_list[] = { { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, }, count_list[] = { { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, }; struct spu_priv2 __iomem *priv2; int i; priv2 = spu->priv2; /* initialize all channel data to zero */ for (i = 0; i < ARRAY_SIZE(zero_list); i++) { int count; out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); for (count = 0; count < zero_list[i].count; count++) out_be64(&priv2->spu_chnldata_RW, 0); } /* initialize channel counts to meaningful values */ for (i = 0; i < ARRAY_SIZE(count_list); i++) { out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); } } EXPORT_SYMBOL_GPL(spu_init_channels); static struct bus_type spu_subsys = { .name = "spu", .dev_name = "spu", }; int spu_add_dev_attr(struct device_attribute *attr) { struct spu *spu; mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) device_create_file(&spu->dev, attr); mutex_unlock(&spu_full_list_mutex); return 0; } EXPORT_SYMBOL_GPL(spu_add_dev_attr); int spu_add_dev_attr_group(const struct attribute_group *attrs) { struct spu *spu; int rc = 0; mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) { rc = sysfs_create_group(&spu->dev.kobj, attrs); /* we're in trouble here, but try unwinding anyway */ if (rc) { printk(KERN_ERR "%s: can't create sysfs group '%s'\n", __func__, attrs->name); list_for_each_entry_continue_reverse(spu, &spu_full_list, full_list) sysfs_remove_group(&spu->dev.kobj, attrs); break; } } mutex_unlock(&spu_full_list_mutex); return rc; } EXPORT_SYMBOL_GPL(spu_add_dev_attr_group); void spu_remove_dev_attr(struct device_attribute *attr) { struct spu *spu; mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) device_remove_file(&spu->dev, attr); mutex_unlock(&spu_full_list_mutex); } EXPORT_SYMBOL_GPL(spu_remove_dev_attr); void spu_remove_dev_attr_group(const struct attribute_group *attrs) { struct spu *spu; mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) sysfs_remove_group(&spu->dev.kobj, attrs); mutex_unlock(&spu_full_list_mutex); } EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group); static int __init spu_create_dev(struct spu *spu) { int ret; spu->dev.id = spu->number; spu->dev.bus = &spu_subsys; ret = device_register(&spu->dev); if (ret) { printk(KERN_ERR "Can't register SPU %d with sysfs\n", spu->number); return ret; } sysfs_add_device_to_node(&spu->dev, spu->node); return 0; } static int __init create_spu(void *data) { struct spu *spu; int ret; static int number; unsigned long flags; ret = -ENOMEM; spu = kzalloc(sizeof (*spu), GFP_KERNEL); if (!spu) goto out; spu->alloc_state = SPU_FREE; spin_lock_init(&spu->register_lock); spin_lock(&spu_lock); spu->number = number++; spin_unlock(&spu_lock); ret = spu_create_spu(spu, data); if (ret) goto out_free; spu_mfc_sdr_setup(spu); spu_mfc_sr1_set(spu, 0x33); ret = spu_request_irqs(spu); if (ret) goto out_destroy; ret = spu_create_dev(spu); if (ret) goto out_free_irqs; mutex_lock(&cbe_spu_info[spu->node].list_mutex); list_add(&spu->cbe_list, &cbe_spu_info[spu->node].spus); cbe_spu_info[spu->node].n_spus++; mutex_unlock(&cbe_spu_info[spu->node].list_mutex); mutex_lock(&spu_full_list_mutex); spin_lock_irqsave(&spu_full_list_lock, flags); list_add(&spu->full_list, &spu_full_list); spin_unlock_irqrestore(&spu_full_list_lock, flags); mutex_unlock(&spu_full_list_mutex); spu->stats.util_state = SPU_UTIL_IDLE_LOADED; spu->stats.tstamp = ktime_get_ns(); INIT_LIST_HEAD(&spu->aff_list); goto out; out_free_irqs: spu_free_irqs(spu); out_destroy: spu_destroy_spu(spu); out_free: kfree(spu); out: return ret; } static const char *spu_state_names[] = { "user", "system", "iowait", "idle" }; static unsigned long long spu_acct_time(struct spu *spu, enum spu_utilization_state state) { unsigned long long time = spu->stats.times[state]; /* * If the spu is idle or the context is stopped, utilization * statistics are not updated. Apply the time delta from the * last recorded state of the spu. */ if (spu->stats.util_state == state) time += ktime_get_ns() - spu->stats.tstamp; return time / NSEC_PER_MSEC; } static ssize_t spu_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct spu *spu = container_of(dev, struct spu, dev); return sprintf(buf, "%s %llu %llu %llu %llu " "%llu %llu %llu %llu %llu %llu %llu %llu\n", spu_state_names[spu->stats.util_state], spu_acct_time(spu, SPU_UTIL_USER), spu_acct_time(spu, SPU_UTIL_SYSTEM), spu_acct_time(spu, SPU_UTIL_IOWAIT), spu_acct_time(spu, SPU_UTIL_IDLE_LOADED), spu->stats.vol_ctx_switch, spu->stats.invol_ctx_switch, spu->stats.slb_flt, spu->stats.hash_flt, spu->stats.min_flt, spu->stats.maj_flt, spu->stats.class2_intr, spu->stats.libassist); } static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL); #ifdef CONFIG_KEXEC_CORE struct crash_spu_info { struct spu *spu; u32 saved_spu_runcntl_RW; u32 saved_spu_status_R; u32 saved_spu_npc_RW; u64 saved_mfc_sr1_RW; u64 saved_mfc_dar; u64 saved_mfc_dsisr; }; #define CRASH_NUM_SPUS 16 /* Enough for current hardware */ static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS]; static void crash_kexec_stop_spus(void) { struct spu *spu; int i; u64 tmp; for (i = 0; i < CRASH_NUM_SPUS; i++) { if (!crash_spu_info[i].spu) continue; spu = crash_spu_info[i].spu; crash_spu_info[i].saved_spu_runcntl_RW = in_be32(&spu->problem->spu_runcntl_RW); crash_spu_info[i].saved_spu_status_R = in_be32(&spu->problem->spu_status_R); crash_spu_info[i].saved_spu_npc_RW = in_be32(&spu->problem->spu_npc_RW); crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); tmp = spu_mfc_sr1_get(spu); crash_spu_info[i].saved_mfc_sr1_RW = tmp; tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; spu_mfc_sr1_set(spu, tmp); __delay(200); } } static void __init crash_register_spus(struct list_head *list) { struct spu *spu; int ret; list_for_each_entry(spu, list, full_list) { if (WARN_ON(spu->number >= CRASH_NUM_SPUS)) continue; crash_spu_info[spu->number].spu = spu; } ret = crash_shutdown_register(&crash_kexec_stop_spus); if (ret) printk(KERN_ERR "Could not register SPU crash handler"); } #else static inline void crash_register_spus(struct list_head *list) { } #endif static void spu_shutdown(void) { struct spu *spu; mutex_lock(&spu_full_list_mutex); list_for_each_entry(spu, &spu_full_list, full_list) { spu_free_irqs(spu); spu_destroy_spu(spu); } mutex_unlock(&spu_full_list_mutex); } static struct syscore_ops spu_syscore_ops = { .shutdown = spu_shutdown, }; static int __init init_spu_base(void) { int i, ret = 0; for (i = 0; i < MAX_NUMNODES; i++) { mutex_init(&cbe_spu_info[i].list_mutex); INIT_LIST_HEAD(&cbe_spu_info[i].spus); } if (!spu_management_ops) goto out; /* create system subsystem for spus */ ret = subsys_system_register(&spu_subsys, NULL); if (ret) goto out; ret = spu_enumerate_spus(create_spu); if (ret < 0) { printk(KERN_WARNING "%s: Error initializing spus\n", __func__); goto out_unregister_subsys; } if (ret > 0) fb_append_extra_logo(&logo_spe_clut224, ret); mutex_lock(&spu_full_list_mutex); xmon_register_spus(&spu_full_list); crash_register_spus(&spu_full_list); mutex_unlock(&spu_full_list_mutex); spu_add_dev_attr(&dev_attr_stat); register_syscore_ops(&spu_syscore_ops); spu_init_affinity(); return 0; out_unregister_subsys: bus_unregister(&spu_subsys); out: return ret; } device_initcall(init_spu_base);
linux-master
arch/powerpc/platforms/cell/spu_base.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * linux/arch/powerpc/platforms/cell/cell_setup.c * * Copyright (C) 1995 Linus Torvalds * Adapted from 'alpha' version by Gary Thomas * Modified by Cort Dougan ([email protected]) * Modified by PPC64 Team, IBM Corp * Modified by Cell Team, IBM Deutschland Entwicklung GmbH */ #undef DEBUG #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/export.h> #include <linux/unistd.h> #include <linux/user.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/console.h> #include <linux/mutex.h> #include <linux/memory_hotplug.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <asm/mmu.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/rtas.h> #include <asm/pci-bridge.h> #include <asm/iommu.h> #include <asm/dma.h> #include <asm/machdep.h> #include <asm/time.h> #include <asm/nvram.h> #include <asm/cputable.h> #include <asm/ppc-pci.h> #include <asm/irq.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/cell-regs.h> #include <asm/io-workarounds.h> #include "cell.h" #include "interrupt.h" #include "pervasive.h" #include "ras.h" #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif static void cell_show_cpuinfo(struct seq_file *m) { struct device_node *root; const char *model = ""; root = of_find_node_by_path("/"); if (root) model = of_get_property(root, "model", NULL); seq_printf(m, "machine\t\t: CHRP %s\n", model); of_node_put(root); } static void cell_progress(char *s, unsigned short hex) { printk("*** %04x : %s\n", hex, s ? s : ""); } static void cell_fixup_pcie_rootcomplex(struct pci_dev *dev) { struct pci_controller *hose; const char *s; int i; if (!machine_is(cell)) return; /* We're searching for a direct child of the PHB */ if (dev->bus->self != NULL || dev->devfn != 0) return; hose = pci_bus_to_host(dev->bus); if (hose == NULL) return; /* Only on PCIE */ if (!of_device_is_compatible(hose->dn, "pciex")) return; /* And only on axon */ s = of_get_property(hose->dn, "model", NULL); if (!s || strcmp(s, "Axon") != 0) return; for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { dev->resource[i].start = dev->resource[i].end = 0; dev->resource[i].flags = 0; } printk(KERN_DEBUG "PCI: Hiding resources on Axon PCIE RC %s\n", pci_name(dev)); } DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, cell_fixup_pcie_rootcomplex); static int cell_setup_phb(struct pci_controller *phb) { const char *model; struct device_node *np; int rc = rtas_setup_phb(phb); if (rc) return rc; phb->controller_ops = cell_pci_controller_ops; np = phb->dn; model = of_get_property(np, "model", NULL); if (model == NULL || !of_node_name_eq(np, "pci")) return 0; /* Setup workarounds for spider */ if (strcmp(model, "Spider")) return 0; iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init, (void *)SPIDER_PCI_REG_BASE); return 0; } static const struct of_device_id cell_bus_ids[] __initconst = { { .type = "soc", }, { .compatible = "soc", }, { .type = "spider", }, { .type = "axon", }, { .type = "plb5", }, { .type = "plb4", }, { .type = "opb", }, { .type = "ebc", }, {}, }; static int __init cell_publish_devices(void) { struct device_node *root = of_find_node_by_path("/"); struct device_node *np; int node; /* Publish OF platform devices for southbridge IOs */ of_platform_bus_probe(NULL, cell_bus_ids, NULL); /* On spider based blades, we need to manually create the OF * platform devices for the PCI host bridges */ for_each_child_of_node(root, np) { if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "pciex")) continue; of_platform_device_create(np, NULL, NULL); } of_node_put(root); /* There is no device for the MIC memory controller, thus we create * a platform device for it to attach the EDAC driver to. */ for_each_online_node(node) { if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL) continue; platform_device_register_simple("cbe-mic", node, NULL, 0); } return 0; } machine_subsys_initcall(cell, cell_publish_devices); static void __init mpic_init_IRQ(void) { struct device_node *dn; struct mpic *mpic; for_each_node_by_name(dn, "interrupt-controller") { if (!of_device_is_compatible(dn, "CBEA,platform-open-pic")) continue; /* The MPIC driver will get everything it needs from the * device-tree, just pass 0 to all arguments */ mpic = mpic_alloc(dn, 0, MPIC_SECONDARY | MPIC_NO_RESET, 0, 0, " MPIC "); if (mpic == NULL) continue; mpic_init(mpic); } } static void __init cell_init_irq(void) { iic_init_IRQ(); spider_init_IRQ(); mpic_init_IRQ(); } static void __init cell_set_dabrx(void) { mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); } static void __init cell_setup_arch(void) { #ifdef CONFIG_SPU_BASE spu_priv1_ops = &spu_priv1_mmio_ops; spu_management_ops = &spu_management_of_ops; #endif cbe_regs_init(); cell_set_dabrx(); #ifdef CONFIG_CBE_RAS cbe_ras_init(); #endif #ifdef CONFIG_SMP smp_init_cell(); #endif /* init to some ~sane value until calibrate_delay() runs */ loops_per_jiffy = 50000000; /* Find and initialize PCI host bridges */ init_pci_config_tokens(); cbe_pervasive_init(); mmio_nvram_init(); } static int __init cell_probe(void) { if (!of_machine_is_compatible("IBM,CBEA") && !of_machine_is_compatible("IBM,CPBW-1.0")) return 0; pm_power_off = rtas_power_off; return 1; } define_machine(cell) { .name = "Cell", .probe = cell_probe, .setup_arch = cell_setup_arch, .show_cpuinfo = cell_show_cpuinfo, .restart = rtas_restart, .halt = rtas_halt, .get_boot_time = rtas_get_boot_time, .get_rtc_time = rtas_get_rtc_time, .set_rtc_time = rtas_set_rtc_time, .progress = cell_progress, .init_IRQ = cell_init_irq, .pci_setup_phb = cell_setup_phb, }; struct pci_controller_ops cell_pci_controller_ops;
linux-master
arch/powerpc/platforms/cell/setup.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * CBE Pervasive Monitor and Debug * * (C) Copyright IBM Corporation 2005 * * Authors: Maximino Aguilar ([email protected]) * Michael N. Day ([email protected]) */ #undef DEBUG #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/percpu.h> #include <linux/types.h> #include <linux/kallsyms.h> #include <linux/pgtable.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/reg.h> #include <asm/cell-regs.h> #include <asm/cpu_has_feature.h> #include "pervasive.h" #include "ras.h" static void cbe_power_save(void) { unsigned long ctrl, thread_switch_control; /* Ensure our interrupt state is properly tracked */ if (!prep_irq_for_idle()) return; ctrl = mfspr(SPRN_CTRLF); /* Enable DEC and EE interrupt request */ thread_switch_control = mfspr(SPRN_TSC_CELL); thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST; switch (ctrl & CTRL_CT) { case CTRL_CT0: thread_switch_control |= TSC_CELL_DEC_ENABLE_0; break; case CTRL_CT1: thread_switch_control |= TSC_CELL_DEC_ENABLE_1; break; default: printk(KERN_WARNING "%s: unknown configuration\n", __func__); break; } mtspr(SPRN_TSC_CELL, thread_switch_control); /* * go into low thread priority, medium priority will be * restored for us after wake-up. */ HMT_low(); /* * atomically disable thread execution and runlatch. * External and Decrementer exceptions are still handled when the * thread is disabled but now enter in cbe_system_reset_exception() */ ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); mtspr(SPRN_CTRLT, ctrl); /* Re-enable interrupts in MSR */ __hard_irq_enable(); } static int cbe_system_reset_exception(struct pt_regs *regs) { switch (regs->msr & SRR1_WAKEMASK) { case SRR1_WAKEDEC: set_dec(1); break; case SRR1_WAKEEE: /* * Handle these when interrupts get re-enabled and we take * them as regular exceptions. We are in an NMI context * and can't handle these here. */ break; case SRR1_WAKEMT: return cbe_sysreset_hack(); #ifdef CONFIG_CBE_RAS case SRR1_WAKESYSERR: cbe_system_error_exception(regs); break; case SRR1_WAKETHERM: cbe_thermal_exception(regs); break; #endif /* CONFIG_CBE_RAS */ default: /* do system reset */ return 0; } /* everything handled */ return 1; } void __init cbe_pervasive_init(void) { int cpu; if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) return; for_each_possible_cpu(cpu) { struct cbe_pmd_regs __iomem *regs = cbe_get_cpu_pmd_regs(cpu); if (!regs) continue; /* Enable Pause(0) control bit */ out_be64(&regs->pmcr, in_be64(&regs->pmcr) | CBE_PMD_PAUSE_ZERO_CONTROL); } ppc_md.power_save = cbe_power_save; ppc_md.system_reset_exception = cbe_system_reset_exception; }
linux-master
arch/powerpc/platforms/cell/pervasive.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * IOMMU implementation for Cell Broadband Processor Architecture * * (C) Copyright IBM Corporation 2006-2008 * * Author: Jeremy Kerr <[email protected]> */ #undef DEBUG #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/notifier.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/memblock.h> #include <asm/prom.h> #include <asm/iommu.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/udbg.h> #include <asm/firmware.h> #include <asm/cell-regs.h> #include "cell.h" #include "interrupt.h" /* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages * instead of leaving them mapped to some dummy page. This can be * enabled once the appropriate workarounds for spider bugs have * been enabled */ #define CELL_IOMMU_REAL_UNMAP /* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of * IO PTEs based on the transfer direction. That can be enabled * once spider-net has been fixed to pass the correct direction * to the DMA mapping functions */ #define CELL_IOMMU_STRICT_PROTECTION #define NR_IOMMUS 2 /* IOC mmap registers */ #define IOC_Reg_Size 0x2000 #define IOC_IOPT_CacheInvd 0x908 #define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul #define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul #define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul #define IOC_IOST_Origin 0x918 #define IOC_IOST_Origin_E 0x8000000000000000ul #define IOC_IOST_Origin_HW 0x0000000000000800ul #define IOC_IOST_Origin_HL 0x0000000000000400ul #define IOC_IO_ExcpStat 0x920 #define IOC_IO_ExcpStat_V 0x8000000000000000ul #define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul #define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul #define IOC_IO_ExcpStat_SPF_P 0x2000000000000000ul #define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul #define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul #define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful #define IOC_IO_ExcpMask 0x928 #define IOC_IO_ExcpMask_SFE 0x4000000000000000ul #define IOC_IO_ExcpMask_PFE 0x2000000000000000ul #define IOC_IOCmd_Offset 0x1000 #define IOC_IOCmd_Cfg 0xc00 #define IOC_IOCmd_Cfg_TE 0x0000800000000000ul /* Segment table entries */ #define IOSTE_V 0x8000000000000000ul /* valid */ #define IOSTE_H 0x4000000000000000ul /* cache hint */ #define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */ #define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */ #define IOSTE_PS_Mask 0x0000000000000007ul /* page size */ #define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */ #define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */ #define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */ #define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */ /* IOMMU sizing */ #define IO_SEGMENT_SHIFT 28 #define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift)) /* The high bit needs to be set on every DMA address */ #define SPIDER_DMA_OFFSET 0x80000000ul struct iommu_window { struct list_head list; struct cbe_iommu *iommu; unsigned long offset; unsigned long size; unsigned int ioid; struct iommu_table table; }; #define NAMESIZE 8 struct cbe_iommu { int nid; char name[NAMESIZE]; void __iomem *xlate_regs; void __iomem *cmd_regs; unsigned long *stab; unsigned long *ptab; void *pad_page; struct list_head windows; }; /* Static array of iommus, one per node * each contains a list of windows, keyed from dma_window property * - on bus setup, look for a matching window, or create one * - on dev setup, assign iommu_table ptr */ static struct cbe_iommu iommus[NR_IOMMUS]; static int cbe_nr_iommus; static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte, long n_ptes) { u64 __iomem *reg; u64 val; long n; reg = iommu->xlate_regs + IOC_IOPT_CacheInvd; while (n_ptes > 0) { /* we can invalidate up to 1 << 11 PTEs at once */ n = min(n_ptes, 1l << 11); val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask) | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask) | IOC_IOPT_CacheInvd_Busy; out_be64(reg, val); while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy) ; n_ptes -= n; pte += n; } } static int tce_build_cell(struct iommu_table *tbl, long index, long npages, unsigned long uaddr, enum dma_data_direction direction, unsigned long attrs) { int i; unsigned long *io_pte, base_pte; struct iommu_window *window = container_of(tbl, struct iommu_window, table); /* implementing proper protection causes problems with the spidernet * driver - check mapping directions later, but allow read & write by * default for now.*/ #ifdef CELL_IOMMU_STRICT_PROTECTION /* to avoid referencing a global, we use a trick here to setup the * protection bit. "prot" is setup to be 3 fields of 4 bits appended * together for each of the 3 supported direction values. It is then * shifted left so that the fields matching the desired direction * lands on the appropriate bits, and other bits are masked out. */ const unsigned long prot = 0xc48; base_pte = ((prot << (52 + 4 * direction)) & (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) | CBE_IOPTE_M | CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); #else base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); #endif if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING)) base_pte &= ~CBE_IOPTE_SO_RW; io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift)) io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); mb(); invalidate_tce_cache(window->iommu, io_pte, npages); pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n", index, npages, direction, base_pte); return 0; } static void tce_free_cell(struct iommu_table *tbl, long index, long npages) { int i; unsigned long *io_pte, pte; struct iommu_window *window = container_of(tbl, struct iommu_window, table); pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages); #ifdef CELL_IOMMU_REAL_UNMAP pte = 0; #else /* spider bridge does PCI reads after freeing - insert a mapping * to a scratch page instead of an invalid entry */ pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW | __pa(window->iommu->pad_page) | (window->ioid & CBE_IOPTE_IOID_Mask); #endif io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); for (i = 0; i < npages; i++) io_pte[i] = pte; mb(); invalidate_tce_cache(window->iommu, io_pte, npages); } static irqreturn_t ioc_interrupt(int irq, void *data) { unsigned long stat, spf; struct cbe_iommu *iommu = data; stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); spf = stat & IOC_IO_ExcpStat_SPF_Mask; /* Might want to rate limit it */ printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat); printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n", !!(stat & IOC_IO_ExcpStat_V), (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ', (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ', (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write", (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask)); printk(KERN_ERR " page=0x%016lx\n", stat & IOC_IO_ExcpStat_ADDR_Mask); /* clear interrupt */ stat &= ~IOC_IO_ExcpStat_V; out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat); return IRQ_HANDLED; } static int __init cell_iommu_find_ioc(int nid, unsigned long *base) { struct device_node *np; struct resource r; *base = 0; /* First look for new style /be nodes */ for_each_node_by_name(np, "ioc") { if (of_node_to_nid(np) != nid) continue; if (of_address_to_resource(np, 0, &r)) { printk(KERN_ERR "iommu: can't get address for %pOF\n", np); continue; } *base = r.start; of_node_put(np); return 0; } /* Ok, let's try the old way */ for_each_node_by_type(np, "cpu") { const unsigned int *nidp; const unsigned long *tmp; nidp = of_get_property(np, "node-id", NULL); if (nidp && *nidp == nid) { tmp = of_get_property(np, "ioc-translation", NULL); if (tmp) { *base = *tmp; of_node_put(np); return 0; } } } return -ENODEV; } static void __init cell_iommu_setup_stab(struct cbe_iommu *iommu, unsigned long dbase, unsigned long dsize, unsigned long fbase, unsigned long fsize) { struct page *page; unsigned long segments, stab_size; segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; pr_debug("%s: iommu[%d]: segments: %lu\n", __func__, iommu->nid, segments); /* set up the segment table */ stab_size = segments * sizeof(unsigned long); page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); BUG_ON(!page); iommu->stab = page_address(page); memset(iommu->stab, 0, stab_size); } static unsigned long *__init cell_iommu_alloc_ptab(struct cbe_iommu *iommu, unsigned long base, unsigned long size, unsigned long gap_base, unsigned long gap_size, unsigned long page_shift) { struct page *page; int i; unsigned long reg, segments, pages_per_segment, ptab_size, n_pte_pages, start_seg, *ptab; start_seg = base >> IO_SEGMENT_SHIFT; segments = size >> IO_SEGMENT_SHIFT; pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); /* PTEs for each segment must start on a 4K boundary */ pages_per_segment = max(pages_per_segment, (1 << 12) / sizeof(unsigned long)); ptab_size = segments * pages_per_segment * sizeof(unsigned long); pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__, iommu->nid, ptab_size, get_order(ptab_size)); page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); BUG_ON(!page); ptab = page_address(page); memset(ptab, 0, ptab_size); /* number of 4K pages needed for a page table */ n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", __func__, iommu->nid, iommu->stab, ptab, n_pte_pages); /* initialise the STEs */ reg = IOSTE_V | ((n_pte_pages - 1) << 5); switch (page_shift) { case 12: reg |= IOSTE_PS_4K; break; case 16: reg |= IOSTE_PS_64K; break; case 20: reg |= IOSTE_PS_1M; break; case 24: reg |= IOSTE_PS_16M; break; default: BUG(); } gap_base = gap_base >> IO_SEGMENT_SHIFT; gap_size = gap_size >> IO_SEGMENT_SHIFT; pr_debug("Setting up IOMMU stab:\n"); for (i = start_seg; i < (start_seg + segments); i++) { if (i >= gap_base && i < (gap_base + gap_size)) { pr_debug("\toverlap at %d, skipping\n", i); continue; } iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * (i - start_seg)); pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); } return ptab; } static void __init cell_iommu_enable_hardware(struct cbe_iommu *iommu) { int ret; unsigned long reg, xlate_base; unsigned int virq; if (cell_iommu_find_ioc(iommu->nid, &xlate_base)) panic("%s: missing IOC register mappings for node %d\n", __func__, iommu->nid); iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size); iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset; /* ensure that the STEs have updated */ mb(); /* setup interrupts for the iommu. */ reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat); out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, reg & ~IOC_IO_ExcpStat_V); out_be64(iommu->xlate_regs + IOC_IO_ExcpMask, IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE); virq = irq_create_mapping(NULL, IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT)); BUG_ON(!virq); ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu); BUG_ON(ret); /* set the IOC segment table origin register (and turn on the iommu) */ reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW; out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg); in_be64(iommu->xlate_regs + IOC_IOST_Origin); /* turn on IO translation */ reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE; out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg); } static void __init cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long base, unsigned long size) { cell_iommu_setup_stab(iommu, base, size, 0, 0); iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, IOMMU_PAGE_SHIFT_4K); cell_iommu_enable_hardware(iommu); } #if 0/* Unused for now */ static struct iommu_window *find_window(struct cbe_iommu *iommu, unsigned long offset, unsigned long size) { struct iommu_window *window; /* todo: check for overlapping (but not equal) windows) */ list_for_each_entry(window, &(iommu->windows), list) { if (window->offset == offset && window->size == size) return window; } return NULL; } #endif static inline u32 cell_iommu_get_ioid(struct device_node *np) { const u32 *ioid; ioid = of_get_property(np, "ioid", NULL); if (ioid == NULL) { printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n", np); return 0; } return *ioid; } static struct iommu_table_ops cell_iommu_ops = { .set = tce_build_cell, .clear = tce_free_cell }; static struct iommu_window * __init cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, unsigned long offset, unsigned long size, unsigned long pte_offset) { struct iommu_window *window; struct page *page; u32 ioid; ioid = cell_iommu_get_ioid(np); window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid); BUG_ON(window == NULL); window->offset = offset; window->size = size; window->ioid = ioid; window->iommu = iommu; window->table.it_blocksize = 16; window->table.it_base = (unsigned long)iommu->ptab; window->table.it_index = iommu->nid; window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K; window->table.it_offset = (offset >> window->table.it_page_shift) + pte_offset; window->table.it_size = size >> window->table.it_page_shift; window->table.it_ops = &cell_iommu_ops; if (!iommu_init_table(&window->table, iommu->nid, 0, 0)) panic("Failed to initialize iommu table"); pr_debug("\tioid %d\n", window->ioid); pr_debug("\tblocksize %ld\n", window->table.it_blocksize); pr_debug("\tbase 0x%016lx\n", window->table.it_base); pr_debug("\toffset 0x%lx\n", window->table.it_offset); pr_debug("\tsize %ld\n", window->table.it_size); list_add(&window->list, &iommu->windows); if (offset != 0) return window; /* We need to map and reserve the first IOMMU page since it's used * by the spider workaround. In theory, we only need to do that when * running on spider but it doesn't really matter. * * This code also assumes that we have a window that starts at 0, * which is the case on all spider based blades. */ page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); BUG_ON(!page); iommu->pad_page = page_address(page); clear_page(iommu->pad_page); __set_bit(0, window->table.it_map); tce_build_cell(&window->table, window->table.it_offset, 1, (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0); return window; } static struct cbe_iommu *cell_iommu_for_node(int nid) { int i; for (i = 0; i < cbe_nr_iommus; i++) if (iommus[i].nid == nid) return &iommus[i]; return NULL; } static unsigned long cell_dma_nommu_offset; static unsigned long dma_iommu_fixed_base; static bool cell_iommu_enabled; /* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */ bool iommu_fixed_is_weak; static struct iommu_table *cell_get_iommu_table(struct device *dev) { struct iommu_window *window; struct cbe_iommu *iommu; /* Current implementation uses the first window available in that * node's iommu. We -might- do something smarter later though it may * never be necessary */ iommu = cell_iommu_for_node(dev_to_node(dev)); if (iommu == NULL || list_empty(&iommu->windows)) { dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n", dev->of_node, dev_to_node(dev)); return NULL; } window = list_entry(iommu->windows.next, struct iommu_window, list); return &window->table; } static u64 cell_iommu_get_fixed_address(struct device *dev); static void cell_dma_dev_setup(struct device *dev) { if (cell_iommu_enabled) { u64 addr = cell_iommu_get_fixed_address(dev); if (addr != OF_BAD_ADDR) dev->archdata.dma_offset = addr + dma_iommu_fixed_base; set_iommu_table_base(dev, cell_get_iommu_table(dev)); } else { dev->archdata.dma_offset = cell_dma_nommu_offset; } } static void cell_pci_dma_dev_setup(struct pci_dev *dev) { cell_dma_dev_setup(&dev->dev); } static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; /* We are only interested in device addition */ if (action != BUS_NOTIFY_ADD_DEVICE) return 0; if (cell_iommu_enabled) dev->dma_ops = &dma_iommu_ops; cell_dma_dev_setup(dev); return 0; } static struct notifier_block cell_of_bus_notifier = { .notifier_call = cell_of_bus_notify }; static int __init cell_iommu_get_window(struct device_node *np, unsigned long *base, unsigned long *size) { const __be32 *dma_window; unsigned long index; /* Use ibm,dma-window if available, else, hard code ! */ dma_window = of_get_property(np, "ibm,dma-window", NULL); if (dma_window == NULL) { *base = 0; *size = 0x80000000u; return -ENODEV; } of_parse_dma_window(np, dma_window, &index, base, size); return 0; } static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np) { struct cbe_iommu *iommu; int nid, i; /* Get node ID */ nid = of_node_to_nid(np); if (nid < 0) { printk(KERN_ERR "iommu: failed to get node for %pOF\n", np); return NULL; } pr_debug("iommu: setting up iommu for node %d (%pOF)\n", nid, np); /* XXX todo: If we can have multiple windows on the same IOMMU, which * isn't the case today, we probably want here to check whether the * iommu for that node is already setup. * However, there might be issue with getting the size right so let's * ignore that for now. We might want to completely get rid of the * multiple window support since the cell iommu supports per-page ioids */ if (cbe_nr_iommus >= NR_IOMMUS) { printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n", np); return NULL; } /* Init base fields */ i = cbe_nr_iommus++; iommu = &iommus[i]; iommu->stab = NULL; iommu->nid = nid; snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i); INIT_LIST_HEAD(&iommu->windows); return iommu; } static void __init cell_iommu_init_one(struct device_node *np, unsigned long offset) { struct cbe_iommu *iommu; unsigned long base, size; iommu = cell_iommu_alloc(np); if (!iommu) return; /* Obtain a window for it */ cell_iommu_get_window(np, &base, &size); pr_debug("\ttranslating window 0x%lx...0x%lx\n", base, base + size - 1); /* Initialize the hardware */ cell_iommu_setup_hardware(iommu, base, size); /* Setup the iommu_table */ cell_iommu_setup_window(iommu, np, base, size, offset >> IOMMU_PAGE_SHIFT_4K); } static void __init cell_disable_iommus(void) { int node; unsigned long base, val; void __iomem *xregs, *cregs; /* Make sure IOC translation is disabled on all nodes */ for_each_online_node(node) { if (cell_iommu_find_ioc(node, &base)) continue; xregs = ioremap(base, IOC_Reg_Size); if (xregs == NULL) continue; cregs = xregs + IOC_IOCmd_Offset; pr_debug("iommu: cleaning up iommu on node %d\n", node); out_be64(xregs + IOC_IOST_Origin, 0); (void)in_be64(xregs + IOC_IOST_Origin); val = in_be64(cregs + IOC_IOCmd_Cfg); val &= ~IOC_IOCmd_Cfg_TE; out_be64(cregs + IOC_IOCmd_Cfg, val); (void)in_be64(cregs + IOC_IOCmd_Cfg); iounmap(xregs); } } static int __init cell_iommu_init_disabled(void) { struct device_node *np = NULL; unsigned long base = 0, size; /* When no iommu is present, we use direct DMA ops */ /* First make sure all IOC translation is turned off */ cell_disable_iommus(); /* If we have no Axon, we set up the spider DMA magic offset */ np = of_find_node_by_name(NULL, "axon"); if (!np) cell_dma_nommu_offset = SPIDER_DMA_OFFSET; of_node_put(np); /* Now we need to check to see where the memory is mapped * in PCI space. We assume that all busses use the same dma * window which is always the case so far on Cell, thus we * pick up the first pci-internal node we can find and check * the DMA window from there. */ for_each_node_by_name(np, "axon") { if (np->parent == NULL || np->parent->parent != NULL) continue; if (cell_iommu_get_window(np, &base, &size) == 0) break; } if (np == NULL) { for_each_node_by_name(np, "pci-internal") { if (np->parent == NULL || np->parent->parent != NULL) continue; if (cell_iommu_get_window(np, &base, &size) == 0) break; } } of_node_put(np); /* If we found a DMA window, we check if it's big enough to enclose * all of physical memory. If not, we force enable IOMMU */ if (np && size < memblock_end_of_DRAM()) { printk(KERN_WARNING "iommu: force-enabled, dma window" " (%ldMB) smaller than total memory (%lldMB)\n", size >> 20, memblock_end_of_DRAM() >> 20); return -ENODEV; } cell_dma_nommu_offset += base; if (cell_dma_nommu_offset != 0) cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; printk("iommu: disabled, direct DMA offset is 0x%lx\n", cell_dma_nommu_offset); return 0; } /* * Fixed IOMMU mapping support * * This code adds support for setting up a fixed IOMMU mapping on certain * cell machines. For 64-bit devices this avoids the performance overhead of * mapping and unmapping pages at runtime. 32-bit devices are unable to use * the fixed mapping. * * The fixed mapping is established at boot, and maps all of physical memory * 1:1 into device space at some offset. On machines with < 30 GB of memory * we setup the fixed mapping immediately above the normal IOMMU window. * * For example a machine with 4GB of memory would end up with the normal * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to * 3GB, plus any offset required by firmware. The firmware offset is encoded * in the "dma-ranges" property. * * On machines with 30GB or more of memory, we are unable to place the fixed * mapping above the normal IOMMU window as we would run out of address space. * Instead we move the normal IOMMU window to coincide with the hash page * table, this region does not need to be part of the fixed mapping as no * device should ever be DMA'ing to it. We then setup the fixed mapping * from 0 to 32GB. */ static u64 cell_iommu_get_fixed_address(struct device *dev) { u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR; struct device_node *np; const u32 *ranges = NULL; int i, len, best, naddr, nsize, pna, range_size; /* We can be called for platform devices that have no of_node */ np = of_node_get(dev->of_node); if (!np) goto out; while (1) { naddr = of_n_addr_cells(np); nsize = of_n_size_cells(np); np = of_get_next_parent(np); if (!np) break; ranges = of_get_property(np, "dma-ranges", &len); /* Ignore empty ranges, they imply no translation required */ if (ranges && len > 0) break; } if (!ranges) { dev_dbg(dev, "iommu: no dma-ranges found\n"); goto out; } len /= sizeof(u32); pna = of_n_addr_cells(np); range_size = naddr + nsize + pna; /* dma-ranges format: * child addr : naddr cells * parent addr : pna cells * size : nsize cells */ for (i = 0, best = -1, best_size = 0; i < len; i += range_size) { cpu_addr = of_translate_dma_address(np, ranges + i + naddr); size = of_read_number(ranges + i + naddr + pna, nsize); if (cpu_addr == 0 && size > best_size) { best = i; best_size = size; } } if (best >= 0) { dev_addr = of_read_number(ranges + best, naddr); } else dev_dbg(dev, "iommu: no suitable range found!\n"); out: of_node_put(np); return dev_addr; } static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask) { return mask == DMA_BIT_MASK(64) && cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR; } static void __init insert_16M_pte(unsigned long addr, unsigned long *ptab, unsigned long base_pte) { unsigned long segment, offset; segment = addr >> IO_SEGMENT_SHIFT; offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24)); ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long)); pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", addr, ptab, segment, offset); ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask); } static void __init cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, struct device_node *np, unsigned long dbase, unsigned long dsize, unsigned long fbase, unsigned long fsize) { unsigned long base_pte, uaddr, ioaddr, *ptab; ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); dma_iommu_fixed_base = fbase; pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask); if (iommu_fixed_is_weak) pr_info("IOMMU: Using weak ordering for fixed mapping\n"); else { pr_info("IOMMU: Using strong ordering for fixed mapping\n"); base_pte |= CBE_IOPTE_SO_RW; } for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) { /* Don't touch the dynamic region */ ioaddr = uaddr + fbase; if (ioaddr >= dbase && ioaddr < (dbase + dsize)) { pr_debug("iommu: fixed/dynamic overlap, skipping\n"); continue; } insert_16M_pte(uaddr, ptab, base_pte); } mb(); } static int __init cell_iommu_fixed_mapping_init(void) { unsigned long dbase, dsize, fbase, fsize, hbase, hend; struct cbe_iommu *iommu; struct device_node *np; /* The fixed mapping is only supported on axon machines */ np = of_find_node_by_name(NULL, "axon"); of_node_put(np); if (!np) { pr_debug("iommu: fixed mapping disabled, no axons found\n"); return -1; } /* We must have dma-ranges properties for fixed mapping to work */ np = of_find_node_with_property(NULL, "dma-ranges"); of_node_put(np); if (!np) { pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); return -1; } /* The default setup is to have the fixed mapping sit after the * dynamic region, so find the top of the largest IOMMU window * on any axon, then add the size of RAM and that's our max value. * If that is > 32GB we have to do other shennanigans. */ fbase = 0; for_each_node_by_name(np, "axon") { cell_iommu_get_window(np, &dbase, &dsize); fbase = max(fbase, dbase + dsize); } fbase = ALIGN(fbase, 1 << IO_SEGMENT_SHIFT); fsize = memblock_phys_mem_size(); if ((fbase + fsize) <= 0x800000000ul) hbase = 0; /* use the device tree window */ else { /* If we're over 32 GB we need to cheat. We can't map all of * RAM with the fixed mapping, and also fit the dynamic * region. So try to place the dynamic region where the hash * table sits, drivers never need to DMA to it, we don't * need a fixed mapping for that area. */ if (!htab_address) { pr_debug("iommu: htab is NULL, on LPAR? Huh?\n"); return -1; } hbase = __pa(htab_address); hend = hbase + htab_size_bytes; /* The window must start and end on a segment boundary */ if ((hbase != ALIGN(hbase, 1 << IO_SEGMENT_SHIFT)) || (hend != ALIGN(hend, 1 << IO_SEGMENT_SHIFT))) { pr_debug("iommu: hash window not segment aligned\n"); return -1; } /* Check the hash window fits inside the real DMA window */ for_each_node_by_name(np, "axon") { cell_iommu_get_window(np, &dbase, &dsize); if (hbase < dbase || (hend > (dbase + dsize))) { pr_debug("iommu: hash window doesn't fit in" "real DMA window\n"); of_node_put(np); return -1; } } fbase = 0; } /* Setup the dynamic regions */ for_each_node_by_name(np, "axon") { iommu = cell_iommu_alloc(np); BUG_ON(!iommu); if (hbase == 0) cell_iommu_get_window(np, &dbase, &dsize); else { dbase = hbase; dsize = htab_size_bytes; } printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx " "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, dbase + dsize, fbase, fbase + fsize); cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, IOMMU_PAGE_SHIFT_4K); cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, fbase, fsize); cell_iommu_enable_hardware(iommu); cell_iommu_setup_window(iommu, np, dbase, dsize, 0); } cell_pci_controller_ops.iommu_bypass_supported = cell_pci_iommu_bypass_supported; return 0; } static int iommu_fixed_disabled; static int __init setup_iommu_fixed(char *str) { struct device_node *pciep; if (strcmp(str, "off") == 0) iommu_fixed_disabled = 1; /* If we can find a pcie-endpoint in the device tree assume that * we're on a triblade or a CAB so by default the fixed mapping * should be set to be weakly ordered; but only if the boot * option WASN'T set for strong ordering */ pciep = of_find_node_by_type(NULL, "pcie-endpoint"); if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) iommu_fixed_is_weak = true; of_node_put(pciep); return 1; } __setup("iommu_fixed=", setup_iommu_fixed); static int __init cell_iommu_init(void) { struct device_node *np; /* If IOMMU is disabled or we have little enough RAM to not need * to enable it, we setup a direct mapping. * * Note: should we make sure we have the IOMMU actually disabled ? */ if (iommu_is_off || (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull)) if (cell_iommu_init_disabled() == 0) goto bail; /* Setup various callbacks */ cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0) goto done; /* Create an iommu for each /axon node. */ for_each_node_by_name(np, "axon") { if (np->parent == NULL || np->parent->parent != NULL) continue; cell_iommu_init_one(np, 0); } /* Create an iommu for each toplevel /pci-internal node for * old hardware/firmware */ for_each_node_by_name(np, "pci-internal") { if (np->parent == NULL || np->parent->parent != NULL) continue; cell_iommu_init_one(np, SPIDER_DMA_OFFSET); } done: /* Setup default PCI iommu ops */ set_pci_dma_ops(&dma_iommu_ops); cell_iommu_enabled = true; bail: /* Register callbacks on OF platform device addition/removal * to handle linking them to the right DMA operations */ bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier); return 0; } machine_arch_initcall(cell, cell_iommu_init);
linux-master
arch/powerpc/platforms/cell/iommu.c
// SPDX-License-Identifier: GPL-2.0-only /* * cbe_regs.c * * Accessor routines for the various MMIO register blocks of the CBE * * (c) 2006 Benjamin Herrenschmidt <[email protected]>, IBM Corp. */ #include <linux/percpu.h> #include <linux/types.h> #include <linux/export.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/pgtable.h> #include <asm/io.h> #include <asm/ptrace.h> #include <asm/cell-regs.h> /* * Current implementation uses "cpu" nodes. We build our own mapping * array of cpu numbers to cpu nodes locally for now to allow interrupt * time code to have a fast path rather than call of_get_cpu_node(). If * we implement cpu hotplug, we'll have to install an appropriate notifier * in order to release references to the cpu going away */ static struct cbe_regs_map { struct device_node *cpu_node; struct device_node *be_node; struct cbe_pmd_regs __iomem *pmd_regs; struct cbe_iic_regs __iomem *iic_regs; struct cbe_mic_tm_regs __iomem *mic_tm_regs; struct cbe_pmd_shadow_regs pmd_shadow_regs; } cbe_regs_maps[MAX_CBE]; static int cbe_regs_map_count; static struct cbe_thread_map { struct device_node *cpu_node; struct device_node *be_node; struct cbe_regs_map *regs; unsigned int thread_id; unsigned int cbe_id; } cbe_thread_map[NR_CPUS]; static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} }; static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE }; static struct cbe_regs_map *cbe_find_map(struct device_node *np) { int i; struct device_node *tmp_np; if (!of_node_is_type(np, "spe")) { for (i = 0; i < cbe_regs_map_count; i++) if (cbe_regs_maps[i].cpu_node == np || cbe_regs_maps[i].be_node == np) return &cbe_regs_maps[i]; return NULL; } if (np->data) return np->data; /* walk up path until cpu or be node was found */ tmp_np = np; do { tmp_np = tmp_np->parent; /* on a correct devicetree we wont get up to root */ BUG_ON(!tmp_np); } while (!of_node_is_type(tmp_np, "cpu") || !of_node_is_type(tmp_np, "be")); np->data = cbe_find_map(tmp_np); return np->data; } struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np) { struct cbe_regs_map *map = cbe_find_map(np); if (map == NULL) return NULL; return map->pmd_regs; } EXPORT_SYMBOL_GPL(cbe_get_pmd_regs); struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu) { struct cbe_regs_map *map = cbe_thread_map[cpu].regs; if (map == NULL) return NULL; return map->pmd_regs; } EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs); struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np) { struct cbe_regs_map *map = cbe_find_map(np); if (map == NULL) return NULL; return &map->pmd_shadow_regs; } struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu) { struct cbe_regs_map *map = cbe_thread_map[cpu].regs; if (map == NULL) return NULL; return &map->pmd_shadow_regs; } struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np) { struct cbe_regs_map *map = cbe_find_map(np); if (map == NULL) return NULL; return map->iic_regs; } struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu) { struct cbe_regs_map *map = cbe_thread_map[cpu].regs; if (map == NULL) return NULL; return map->iic_regs; } struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np) { struct cbe_regs_map *map = cbe_find_map(np); if (map == NULL) return NULL; return map->mic_tm_regs; } struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu) { struct cbe_regs_map *map = cbe_thread_map[cpu].regs; if (map == NULL) return NULL; return map->mic_tm_regs; } EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs); u32 cbe_get_hw_thread_id(int cpu) { return cbe_thread_map[cpu].thread_id; } EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id); u32 cbe_cpu_to_node(int cpu) { return cbe_thread_map[cpu].cbe_id; } EXPORT_SYMBOL_GPL(cbe_cpu_to_node); u32 cbe_node_to_cpu(int node) { return cpumask_first(&cbe_local_mask[node]); } EXPORT_SYMBOL_GPL(cbe_node_to_cpu); static struct device_node *__init cbe_get_be_node(int cpu_id) { struct device_node *np; for_each_node_by_type (np, "be") { int len,i; const phandle *cpu_handle; cpu_handle = of_get_property(np, "cpus", &len); /* * the CAB SLOF tree is non compliant, so we just assume * there is only one node */ if (WARN_ON_ONCE(!cpu_handle)) return np; for (i = 0; i < len; i++) { struct device_node *ch_np = of_find_node_by_phandle(cpu_handle[i]); struct device_node *ci_np = of_get_cpu_node(cpu_id, NULL); of_node_put(ch_np); of_node_put(ci_np); if (ch_np == ci_np) return np; } } return NULL; } static void __init cbe_fill_regs_map(struct cbe_regs_map *map) { if(map->be_node) { struct device_node *be, *np, *parent_np; be = map->be_node; for_each_node_by_type(np, "pervasive") { parent_np = of_get_parent(np); if (parent_np == be) map->pmd_regs = of_iomap(np, 0); of_node_put(parent_np); } for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") { parent_np = of_get_parent(np); if (parent_np == be) map->iic_regs = of_iomap(np, 2); of_node_put(parent_np); } for_each_node_by_type(np, "mic-tm") { parent_np = of_get_parent(np); if (parent_np == be) map->mic_tm_regs = of_iomap(np, 0); of_node_put(parent_np); } } else { struct device_node *cpu; /* That hack must die die die ! */ const struct address_prop { unsigned long address; unsigned int len; } __attribute__((packed)) *prop; cpu = map->cpu_node; prop = of_get_property(cpu, "pervasive", NULL); if (prop != NULL) map->pmd_regs = ioremap(prop->address, prop->len); prop = of_get_property(cpu, "iic", NULL); if (prop != NULL) map->iic_regs = ioremap(prop->address, prop->len); prop = of_get_property(cpu, "mic-tm", NULL); if (prop != NULL) map->mic_tm_regs = ioremap(prop->address, prop->len); } } void __init cbe_regs_init(void) { int i; unsigned int thread_id; struct device_node *cpu; /* Build local fast map of CPUs */ for_each_possible_cpu(i) { cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id); cbe_thread_map[i].be_node = cbe_get_be_node(i); cbe_thread_map[i].thread_id = thread_id; } /* Find maps for each device tree CPU */ for_each_node_by_type(cpu, "cpu") { struct cbe_regs_map *map; unsigned int cbe_id; cbe_id = cbe_regs_map_count++; map = &cbe_regs_maps[cbe_id]; if (cbe_regs_map_count > MAX_CBE) { printk(KERN_ERR "cbe_regs: More BE chips than supported" "!\n"); cbe_regs_map_count--; of_node_put(cpu); return; } of_node_put(map->cpu_node); map->cpu_node = of_node_get(cpu); for_each_possible_cpu(i) { struct cbe_thread_map *thread = &cbe_thread_map[i]; if (thread->cpu_node == cpu) { thread->regs = map; thread->cbe_id = cbe_id; map->be_node = thread->be_node; cpumask_set_cpu(i, &cbe_local_mask[cbe_id]); if(thread->thread_id == 0) cpumask_set_cpu(i, &cbe_first_online_cpu); } } cbe_fill_regs_map(map); } }
linux-master
arch/powerpc/platforms/cell/cbe_regs.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * thermal support for the cell processor * * This module adds some sysfs attributes to cpu and spu nodes. * Base for measurements are the digital thermal sensors (DTS) * located on the chip. * The accuracy is 2 degrees, starting from 65 up to 125 degrees celsius * The attributes can be found under * /sys/devices/system/cpu/cpuX/thermal * /sys/devices/system/spu/spuX/thermal * * The following attributes are added for each node: * temperature: * contains the current temperature measured by the DTS * throttle_begin: * throttling begins when temperature is greater or equal to * throttle_begin. Setting this value to 125 prevents throttling. * throttle_end: * throttling is being ceased, if the temperature is lower than * throttle_end. Due to a delay between applying throttling and * a reduced temperature this value should be less than throttle_begin. * A value equal to throttle_begin provides only a very little hysteresis. * throttle_full_stop: * If the temperatrue is greater or equal to throttle_full_stop, * full throttling is applied to the cpu or spu. This value should be * greater than throttle_begin and throttle_end. Setting this value to * 65 prevents the unit from running code at all. * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Christian Krafft <[email protected]> */ #include <linux/module.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/cpu.h> #include <linux/stringify.h> #include <asm/spu.h> #include <asm/io.h> #include <asm/cell-regs.h> #include "spu_priv1_mmio.h" #define TEMP_MIN 65 #define TEMP_MAX 125 #define DEVICE_PREFIX_ATTR(_prefix,_name,_mode) \ struct device_attribute attr_ ## _prefix ## _ ## _name = { \ .attr = { .name = __stringify(_name), .mode = _mode }, \ .show = _prefix ## _show_ ## _name, \ .store = _prefix ## _store_ ## _name, \ }; static inline u8 reg_to_temp(u8 reg_value) { return ((reg_value & 0x3f) << 1) + TEMP_MIN; } static inline u8 temp_to_reg(u8 temp) { return ((temp - TEMP_MIN) >> 1) & 0x3f; } static struct cbe_pmd_regs __iomem *get_pmd_regs(struct device *dev) { struct spu *spu; spu = container_of(dev, struct spu, dev); return cbe_get_pmd_regs(spu_devnode(spu)); } /* returns the value for a given spu in a given register */ static u8 spu_read_register_value(struct device *dev, union spe_reg __iomem *reg) { union spe_reg value; struct spu *spu; spu = container_of(dev, struct spu, dev); value.val = in_be64(&reg->val); return value.spe[spu->spe_id]; } static ssize_t spu_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { u8 value; struct cbe_pmd_regs __iomem *pmd_regs; pmd_regs = get_pmd_regs(dev); value = spu_read_register_value(dev, &pmd_regs->ts_ctsr1); return sprintf(buf, "%d\n", reg_to_temp(value)); } static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, int pos) { u64 value; value = in_be64(&pmd_regs->tm_tpr.val); /* access the corresponding byte */ value >>= pos; value &= 0x3F; return sprintf(buf, "%d\n", reg_to_temp(value)); } static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos) { u64 reg_value; unsigned int temp; u64 new_value; int ret; ret = sscanf(buf, "%u", &temp); if (ret != 1 || temp < TEMP_MIN || temp > TEMP_MAX) return -EINVAL; new_value = temp_to_reg(temp); reg_value = in_be64(&pmd_regs->tm_tpr.val); /* zero out bits for new value */ reg_value &= ~(0xffull << pos); /* set bits to new value */ reg_value |= new_value << pos; out_be64(&pmd_regs->tm_tpr.val, reg_value); return size; } static ssize_t spu_show_throttle_end(struct device *dev, struct device_attribute *attr, char *buf) { return show_throttle(get_pmd_regs(dev), buf, 0); } static ssize_t spu_show_throttle_begin(struct device *dev, struct device_attribute *attr, char *buf) { return show_throttle(get_pmd_regs(dev), buf, 8); } static ssize_t spu_show_throttle_full_stop(struct device *dev, struct device_attribute *attr, char *buf) { return show_throttle(get_pmd_regs(dev), buf, 16); } static ssize_t spu_store_throttle_end(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return store_throttle(get_pmd_regs(dev), buf, size, 0); } static ssize_t spu_store_throttle_begin(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return store_throttle(get_pmd_regs(dev), buf, size, 8); } static ssize_t spu_store_throttle_full_stop(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return store_throttle(get_pmd_regs(dev), buf, size, 16); } static ssize_t ppe_show_temp(struct device *dev, char *buf, int pos) { struct cbe_pmd_regs __iomem *pmd_regs; u64 value; pmd_regs = cbe_get_cpu_pmd_regs(dev->id); value = in_be64(&pmd_regs->ts_ctsr2); value = (value >> pos) & 0x3f; return sprintf(buf, "%d\n", reg_to_temp(value)); } /* shows the temperature of the DTS on the PPE, * located near the linear thermal sensor */ static ssize_t ppe_show_temp0(struct device *dev, struct device_attribute *attr, char *buf) { return ppe_show_temp(dev, buf, 32); } /* shows the temperature of the second DTS on the PPE */ static ssize_t ppe_show_temp1(struct device *dev, struct device_attribute *attr, char *buf) { return ppe_show_temp(dev, buf, 0); } static ssize_t ppe_show_throttle_end(struct device *dev, struct device_attribute *attr, char *buf) { return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 32); } static ssize_t ppe_show_throttle_begin(struct device *dev, struct device_attribute *attr, char *buf) { return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 40); } static ssize_t ppe_show_throttle_full_stop(struct device *dev, struct device_attribute *attr, char *buf) { return show_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, 48); } static ssize_t ppe_store_throttle_end(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 32); } static ssize_t ppe_store_throttle_begin(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 40); } static ssize_t ppe_store_throttle_full_stop(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { return store_throttle(cbe_get_cpu_pmd_regs(dev->id), buf, size, 48); } static struct device_attribute attr_spu_temperature = { .attr = {.name = "temperature", .mode = 0400 }, .show = spu_show_temp, }; static DEVICE_PREFIX_ATTR(spu, throttle_end, 0600); static DEVICE_PREFIX_ATTR(spu, throttle_begin, 0600); static DEVICE_PREFIX_ATTR(spu, throttle_full_stop, 0600); static struct attribute *spu_attributes[] = { &attr_spu_temperature.attr, &attr_spu_throttle_end.attr, &attr_spu_throttle_begin.attr, &attr_spu_throttle_full_stop.attr, NULL, }; static const struct attribute_group spu_attribute_group = { .name = "thermal", .attrs = spu_attributes, }; static struct device_attribute attr_ppe_temperature0 = { .attr = {.name = "temperature0", .mode = 0400 }, .show = ppe_show_temp0, }; static struct device_attribute attr_ppe_temperature1 = { .attr = {.name = "temperature1", .mode = 0400 }, .show = ppe_show_temp1, }; static DEVICE_PREFIX_ATTR(ppe, throttle_end, 0600); static DEVICE_PREFIX_ATTR(ppe, throttle_begin, 0600); static DEVICE_PREFIX_ATTR(ppe, throttle_full_stop, 0600); static struct attribute *ppe_attributes[] = { &attr_ppe_temperature0.attr, &attr_ppe_temperature1.attr, &attr_ppe_throttle_end.attr, &attr_ppe_throttle_begin.attr, &attr_ppe_throttle_full_stop.attr, NULL, }; static struct attribute_group ppe_attribute_group = { .name = "thermal", .attrs = ppe_attributes, }; /* * initialize throttling with default values */ static int __init init_default_values(void) { int cpu; struct cbe_pmd_regs __iomem *pmd_regs; struct device *dev; union ppe_spe_reg tpr; union spe_reg str1; u64 str2; union spe_reg cr1; u64 cr2; /* TPR defaults */ /* ppe * 1F - no full stop * 08 - dynamic throttling starts if over 80 degrees * 03 - dynamic throttling ceases if below 70 degrees */ tpr.ppe = 0x1F0803; /* spe * 10 - full stopped when over 96 degrees * 08 - dynamic throttling starts if over 80 degrees * 03 - dynamic throttling ceases if below 70 degrees */ tpr.spe = 0x100803; /* STR defaults */ /* str1 * 10 - stop 16 of 32 cycles */ str1.val = 0x1010101010101010ull; /* str2 * 10 - stop 16 of 32 cycles */ str2 = 0x10; /* CR defaults */ /* cr1 * 4 - normal operation */ cr1.val = 0x0404040404040404ull; /* cr2 * 4 - normal operation */ cr2 = 0x04; for_each_possible_cpu (cpu) { pr_debug("processing cpu %d\n", cpu); dev = get_cpu_device(cpu); if (!dev) { pr_info("invalid dev pointer for cbe_thermal\n"); return -EINVAL; } pmd_regs = cbe_get_cpu_pmd_regs(dev->id); if (!pmd_regs) { pr_info("invalid CBE regs pointer for cbe_thermal\n"); return -EINVAL; } out_be64(&pmd_regs->tm_str2, str2); out_be64(&pmd_regs->tm_str1.val, str1.val); out_be64(&pmd_regs->tm_tpr.val, tpr.val); out_be64(&pmd_regs->tm_cr1.val, cr1.val); out_be64(&pmd_regs->tm_cr2, cr2); } return 0; } static int __init thermal_init(void) { int rc = init_default_values(); if (rc == 0) { spu_add_dev_attr_group(&spu_attribute_group); cpu_add_dev_attr_group(&ppe_attribute_group); } return rc; } module_init(thermal_init); static void __exit thermal_exit(void) { spu_remove_dev_attr_group(&spu_attribute_group); cpu_remove_dev_attr_group(&ppe_attribute_group); } module_exit(thermal_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Krafft <[email protected]>");
linux-master
arch/powerpc/platforms/cell/cbe_thermal.c
// SPDX-License-Identifier: GPL-2.0-only /* * spu management operations for of based platforms * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * Copyright 2006 Sony Corp. * (C) Copyright 2007 TOSHIBA CORPORATION */ #include <linux/interrupt.h> #include <linux/list.h> #include <linux/export.h> #include <linux/ptrace.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/firmware.h> #include "spufs/spufs.h" #include "interrupt.h" #include "spu_priv1_mmio.h" struct device_node *spu_devnode(struct spu *spu) { return spu->devnode; } EXPORT_SYMBOL_GPL(spu_devnode); static u64 __init find_spu_unit_number(struct device_node *spe) { const unsigned int *prop; int proplen; /* new device trees should provide the physical-id attribute */ prop = of_get_property(spe, "physical-id", &proplen); if (proplen == 4) return (u64)*prop; /* celleb device tree provides the unit-id */ prop = of_get_property(spe, "unit-id", &proplen); if (proplen == 4) return (u64)*prop; /* legacy device trees provide the id in the reg attribute */ prop = of_get_property(spe, "reg", &proplen); if (proplen == 4) return (u64)*prop; return 0; } static void spu_unmap(struct spu *spu) { if (!firmware_has_feature(FW_FEATURE_LPAR)) iounmap(spu->priv1); iounmap(spu->priv2); iounmap(spu->problem); iounmap((__force u8 __iomem *)spu->local_store); } static int __init spu_map_interrupts_old(struct spu *spu, struct device_node *np) { unsigned int isrc; const u32 *tmp; int nid; /* Get the interrupt source unit from the device-tree */ tmp = of_get_property(np, "isrc", NULL); if (!tmp) return -ENODEV; isrc = tmp[0]; tmp = of_get_property(np->parent->parent, "node-id", NULL); if (!tmp) { printk(KERN_WARNING "%s: can't find node-id\n", __func__); nid = spu->node; } else nid = tmp[0]; /* Add the node number */ isrc |= nid << IIC_IRQ_NODE_SHIFT; /* Now map interrupts of all 3 classes */ spu->irqs[0] = irq_create_mapping(NULL, IIC_IRQ_CLASS_0 | isrc); spu->irqs[1] = irq_create_mapping(NULL, IIC_IRQ_CLASS_1 | isrc); spu->irqs[2] = irq_create_mapping(NULL, IIC_IRQ_CLASS_2 | isrc); /* Right now, we only fail if class 2 failed */ if (!spu->irqs[2]) return -EINVAL; return 0; } static void __iomem * __init spu_map_prop_old(struct spu *spu, struct device_node *n, const char *name) { const struct address_prop { unsigned long address; unsigned int len; } __attribute__((packed)) *prop; int proplen; prop = of_get_property(n, name, &proplen); if (prop == NULL || proplen != sizeof (struct address_prop)) return NULL; return ioremap(prop->address, prop->len); } static int __init spu_map_device_old(struct spu *spu) { struct device_node *node = spu->devnode; const char *prop; int ret; ret = -ENODEV; spu->name = of_get_property(node, "name", NULL); if (!spu->name) goto out; prop = of_get_property(node, "local-store", NULL); if (!prop) goto out; spu->local_store_phys = *(unsigned long *)prop; /* we use local store as ram, not io memory */ spu->local_store = (void __force *) spu_map_prop_old(spu, node, "local-store"); if (!spu->local_store) goto out; prop = of_get_property(node, "problem", NULL); if (!prop) goto out_unmap; spu->problem_phys = *(unsigned long *)prop; spu->problem = spu_map_prop_old(spu, node, "problem"); if (!spu->problem) goto out_unmap; spu->priv2 = spu_map_prop_old(spu, node, "priv2"); if (!spu->priv2) goto out_unmap; if (!firmware_has_feature(FW_FEATURE_LPAR)) { spu->priv1 = spu_map_prop_old(spu, node, "priv1"); if (!spu->priv1) goto out_unmap; } ret = 0; goto out; out_unmap: spu_unmap(spu); out: return ret; } static int __init spu_map_interrupts(struct spu *spu, struct device_node *np) { int i; for (i=0; i < 3; i++) { spu->irqs[i] = irq_of_parse_and_map(np, i); if (!spu->irqs[i]) goto err; } return 0; err: pr_debug("failed to map irq %x for spu %s\n", i, spu->name); for (; i >= 0; i--) { if (spu->irqs[i]) irq_dispose_mapping(spu->irqs[i]); } return -EINVAL; } static int __init spu_map_resource(struct spu *spu, int nr, void __iomem** virt, unsigned long *phys) { struct device_node *np = spu->devnode; struct resource resource = { }; unsigned long len; int ret; ret = of_address_to_resource(np, nr, &resource); if (ret) return ret; if (phys) *phys = resource.start; len = resource_size(&resource); *virt = ioremap(resource.start, len); if (!*virt) return -EINVAL; return 0; } static int __init spu_map_device(struct spu *spu) { struct device_node *np = spu->devnode; int ret = -ENODEV; spu->name = of_get_property(np, "name", NULL); if (!spu->name) goto out; ret = spu_map_resource(spu, 0, (void __iomem**)&spu->local_store, &spu->local_store_phys); if (ret) { pr_debug("spu_new: failed to map %pOF resource 0\n", np); goto out; } ret = spu_map_resource(spu, 1, (void __iomem**)&spu->problem, &spu->problem_phys); if (ret) { pr_debug("spu_new: failed to map %pOF resource 1\n", np); goto out_unmap; } ret = spu_map_resource(spu, 2, (void __iomem**)&spu->priv2, NULL); if (ret) { pr_debug("spu_new: failed to map %pOF resource 2\n", np); goto out_unmap; } if (!firmware_has_feature(FW_FEATURE_LPAR)) ret = spu_map_resource(spu, 3, (void __iomem**)&spu->priv1, NULL); if (ret) { pr_debug("spu_new: failed to map %pOF resource 3\n", np); goto out_unmap; } pr_debug("spu_new: %pOF maps:\n", np); pr_debug(" local store : 0x%016lx -> 0x%p\n", spu->local_store_phys, spu->local_store); pr_debug(" problem state : 0x%016lx -> 0x%p\n", spu->problem_phys, spu->problem); pr_debug(" priv2 : 0x%p\n", spu->priv2); pr_debug(" priv1 : 0x%p\n", spu->priv1); return 0; out_unmap: spu_unmap(spu); out: pr_debug("failed to map spe %s: %d\n", spu->name, ret); return ret; } static int __init of_enumerate_spus(int (*fn)(void *data)) { int ret; struct device_node *node; unsigned int n = 0; ret = -ENODEV; for_each_node_by_type(node, "spe") { ret = fn(node); if (ret) { printk(KERN_WARNING "%s: Error initializing %pOFn\n", __func__, node); of_node_put(node); break; } n++; } return ret ? ret : n; } static int __init of_create_spu(struct spu *spu, void *data) { int ret; struct device_node *spe = (struct device_node *)data; static int legacy_map = 0, legacy_irq = 0; spu->devnode = of_node_get(spe); spu->spe_id = find_spu_unit_number(spe); spu->node = of_node_to_nid(spe); if (spu->node >= MAX_NUMNODES) { printk(KERN_WARNING "SPE %pOF on node %d ignored," " node number too big\n", spe, spu->node); printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n"); ret = -ENODEV; goto out; } ret = spu_map_device(spu); if (ret) { if (!legacy_map) { legacy_map = 1; printk(KERN_WARNING "%s: Legacy device tree found, " "trying to map old style\n", __func__); } ret = spu_map_device_old(spu); if (ret) { printk(KERN_ERR "Unable to map %s\n", spu->name); goto out; } } ret = spu_map_interrupts(spu, spe); if (ret) { if (!legacy_irq) { legacy_irq = 1; printk(KERN_WARNING "%s: Legacy device tree found, " "trying old style irq\n", __func__); } ret = spu_map_interrupts_old(spu, spe); if (ret) { printk(KERN_ERR "%s: could not map interrupts\n", spu->name); goto out_unmap; } } pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name, spu->local_store, spu->problem, spu->priv1, spu->priv2, spu->number); goto out; out_unmap: spu_unmap(spu); out: return ret; } static int of_destroy_spu(struct spu *spu) { spu_unmap(spu); of_node_put(spu->devnode); return 0; } static void enable_spu_by_master_run(struct spu_context *ctx) { ctx->ops->master_start(ctx); } static void disable_spu_by_master_run(struct spu_context *ctx) { ctx->ops->master_stop(ctx); } /* Hardcoded affinity idxs for qs20 */ #define QS20_SPES_PER_BE 8 static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 }; static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 }; static struct spu *__init spu_lookup_reg(int node, u32 reg) { struct spu *spu; const u32 *spu_reg; list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { spu_reg = of_get_property(spu_devnode(spu), "reg", NULL); if (*spu_reg == reg) return spu; } return NULL; } static void __init init_affinity_qs20_harcoded(void) { int node, i; struct spu *last_spu, *spu; u32 reg; for (node = 0; node < MAX_NUMNODES; node++) { last_spu = NULL; for (i = 0; i < QS20_SPES_PER_BE; i++) { reg = qs20_reg_idxs[i]; spu = spu_lookup_reg(node, reg); if (!spu) continue; spu->has_mem_affinity = qs20_reg_memory[reg]; if (last_spu) list_add_tail(&spu->aff_list, &last_spu->aff_list); last_spu = spu; } } } static int __init of_has_vicinity(void) { struct device_node *dn; for_each_node_by_type(dn, "spe") { if (of_property_present(dn, "vicinity")) { of_node_put(dn); return 1; } } return 0; } static struct spu *__init devnode_spu(int cbe, struct device_node *dn) { struct spu *spu; list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) if (spu_devnode(spu) == dn) return spu; return NULL; } static struct spu * __init neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid) { struct spu *spu; struct device_node *spu_dn; const phandle *vic_handles; int lenp, i; list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) { spu_dn = spu_devnode(spu); if (spu_dn == avoid) continue; vic_handles = of_get_property(spu_dn, "vicinity", &lenp); for (i=0; i < (lenp / sizeof(phandle)); i++) { if (vic_handles[i] == target->phandle) return spu; } } return NULL; } static void __init init_affinity_node(int cbe) { struct spu *spu, *last_spu; struct device_node *vic_dn, *last_spu_dn; phandle avoid_ph; const phandle *vic_handles; int lenp, i, added; last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu, cbe_list); avoid_ph = 0; for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) { last_spu_dn = spu_devnode(last_spu); vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp); /* * Walk through each phandle in vicinity property of the spu * (typically two vicinity phandles per spe node) */ for (i = 0; i < (lenp / sizeof(phandle)); i++) { if (vic_handles[i] == avoid_ph) continue; vic_dn = of_find_node_by_phandle(vic_handles[i]); if (!vic_dn) continue; if (of_node_name_eq(vic_dn, "spe") ) { spu = devnode_spu(cbe, vic_dn); avoid_ph = last_spu_dn->phandle; } else { /* * "mic-tm" and "bif0" nodes do not have * vicinity property. So we need to find the * spe which has vic_dn as neighbour, but * skipping the one we came from (last_spu_dn) */ spu = neighbour_spu(cbe, vic_dn, last_spu_dn); if (!spu) continue; if (of_node_name_eq(vic_dn, "mic-tm")) { last_spu->has_mem_affinity = 1; spu->has_mem_affinity = 1; } avoid_ph = vic_dn->phandle; } of_node_put(vic_dn); list_add_tail(&spu->aff_list, &last_spu->aff_list); last_spu = spu; break; } } } static void __init init_affinity_fw(void) { int cbe; for (cbe = 0; cbe < MAX_NUMNODES; cbe++) init_affinity_node(cbe); } static int __init init_affinity(void) { if (of_has_vicinity()) { init_affinity_fw(); } else { if (of_machine_is_compatible("IBM,CPBW-1.0")) init_affinity_qs20_harcoded(); else printk("No affinity configuration found\n"); } return 0; } const struct spu_management_ops spu_management_of_ops = { .enumerate_spus = of_enumerate_spus, .create_spu = of_create_spu, .destroy_spu = of_destroy_spu, .enable_spu = enable_spu_by_master_run, .disable_spu = disable_spu_by_master_run, .init_affinity = init_affinity, };
linux-master
arch/powerpc/platforms/cell/spu_manage.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * driver for powerbutton on IBM cell blades * * (C) Copyright IBM Corp. 2005-2008 * * Author: Christian Krafft <[email protected]> */ #include <linux/input.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <asm/pmi.h> static struct input_dev *button_dev; static struct platform_device *button_pdev; static void cbe_powerbutton_handle_pmi(pmi_message_t pmi_msg) { BUG_ON(pmi_msg.type != PMI_TYPE_POWER_BUTTON); input_report_key(button_dev, KEY_POWER, 1); input_sync(button_dev); input_report_key(button_dev, KEY_POWER, 0); input_sync(button_dev); } static struct pmi_handler cbe_pmi_handler = { .type = PMI_TYPE_POWER_BUTTON, .handle_pmi_message = cbe_powerbutton_handle_pmi, }; static int __init cbe_powerbutton_init(void) { int ret = 0; struct input_dev *dev; if (!of_machine_is_compatible("IBM,CBPLUS-1.0")) { printk(KERN_ERR "%s: Not a cell blade.\n", __func__); ret = -ENODEV; goto out; } dev = input_allocate_device(); if (!dev) { ret = -ENOMEM; printk(KERN_ERR "%s: Not enough memory.\n", __func__); goto out; } set_bit(EV_KEY, dev->evbit); set_bit(KEY_POWER, dev->keybit); dev->name = "Power Button"; dev->id.bustype = BUS_HOST; /* this makes the button look like an acpi power button * no clue whether anyone relies on that though */ dev->id.product = 0x02; dev->phys = "LNXPWRBN/button/input0"; button_pdev = platform_device_register_simple("power_button", 0, NULL, 0); if (IS_ERR(button_pdev)) { ret = PTR_ERR(button_pdev); goto out_free_input; } dev->dev.parent = &button_pdev->dev; ret = input_register_device(dev); if (ret) { printk(KERN_ERR "%s: Failed to register device\n", __func__); goto out_free_pdev; } button_dev = dev; ret = pmi_register_handler(&cbe_pmi_handler); if (ret) { printk(KERN_ERR "%s: Failed to register with pmi.\n", __func__); goto out_free_pdev; } goto out; out_free_pdev: platform_device_unregister(button_pdev); out_free_input: input_free_device(dev); out: return ret; } static void __exit cbe_powerbutton_exit(void) { pmi_unregister_handler(&cbe_pmi_handler); platform_device_unregister(button_pdev); input_free_device(button_dev); } module_init(cbe_powerbutton_init); module_exit(cbe_powerbutton_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Krafft <[email protected]>");
linux-master
arch/powerpc/platforms/cell/cbe_powerbutton.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SMP support for BPA machines. * * Dave Engebretsen, Peter Bergner, and * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com * * Plus various changes from other IBM teams... */ #undef DEBUG #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/cache.h> #include <linux/err.h> #include <linux/device.h> #include <linux/cpu.h> #include <linux/pgtable.h> #include <asm/ptrace.h> #include <linux/atomic.h> #include <asm/irq.h> #include <asm/page.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/paca.h> #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/firmware.h> #include <asm/rtas.h> #include <asm/cputhreads.h> #include <asm/code-patching.h> #include "interrupt.h" #include <asm/udbg.h> #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif /* * The Primary thread of each non-boot processor was started from the OF client * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. */ static cpumask_t of_spin_map; /** * smp_startup_cpu() - start the given cpu * * At boot time, there is nothing to do for primary threads which were * started from Open Firmware. For anything else, call RTAS with the * appropriate start location. * * Returns: * 0 - failure * 1 - success */ static inline int smp_startup_cpu(unsigned int lcpu) { int status; unsigned long start_here = __pa(ppc_function_entry(generic_secondary_smp_init)); unsigned int pcpu; int start_cpu; if (cpumask_test_cpu(lcpu, &of_spin_map)) /* Already started by OF and sitting in spin loop */ return 1; pcpu = get_hard_smp_processor_id(lcpu); /* * If the RTAS start-cpu token does not exist then presume the * cpu is already spinning. */ start_cpu = rtas_function_token(RTAS_FN_START_CPU); if (start_cpu == RTAS_UNKNOWN_SERVICE) return 1; status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu); if (status != 0) { printk(KERN_ERR "start-cpu failed: %i\n", status); return 0; } return 1; } static void smp_cell_setup_cpu(int cpu) { if (cpu != boot_cpuid) iic_setup_cpu(); /* * change default DABRX to allow user watchpoints */ mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); } static int smp_cell_kick_cpu(int nr) { if (nr < 0 || nr >= nr_cpu_ids) return -EINVAL; if (!smp_startup_cpu(nr)) return -ENOENT; /* * The processor is currently spinning, waiting for the * cpu_start field to become non-zero After we set cpu_start, * the processor will continue on to secondary_start */ paca_ptrs[nr]->cpu_start = 1; return 0; } static struct smp_ops_t bpa_iic_smp_ops = { .message_pass = iic_message_pass, .probe = iic_request_IPIs, .kick_cpu = smp_cell_kick_cpu, .setup_cpu = smp_cell_setup_cpu, .cpu_bootable = smp_generic_cpu_bootable, }; /* This is called very early */ void __init smp_init_cell(void) { int i; DBG(" -> smp_init_cell()\n"); smp_ops = &bpa_iic_smp_ops; /* Mark threads which are still spinning in hold loops. */ if (cpu_has_feature(CPU_FTR_SMT)) { for_each_present_cpu(i) { if (cpu_thread_in_core(i) == 0) cpumask_set_cpu(i, &of_spin_map); } } else cpumask_copy(&of_spin_map, cpu_present_mask); cpumask_clear_cpu(boot_cpuid, &of_spin_map); /* Non-lpar has additional take/give timebase */ if (rtas_function_token(RTAS_FN_FREEZE_TIME_BASE) != RTAS_UNKNOWN_SERVICE) { smp_ops->give_timebase = rtas_give_timebase; smp_ops->take_timebase = rtas_take_timebase; } DBG(" <- smp_init_cell()\n"); }
linux-master
arch/powerpc/platforms/cell/smp.c
// SPDX-License-Identifier: GPL-2.0-only /* * spu hypervisor abstraction for direct hardware access. * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * Copyright 2006 Sony Corp. */ #include <linux/interrupt.h> #include <linux/list.h> #include <linux/ptrace.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/sched.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/firmware.h> #include "interrupt.h" #include "spu_priv1_mmio.h" static void int_mask_and(struct spu *spu, int class, u64 mask) { u64 old_mask; old_mask = in_be64(&spu->priv1->int_mask_RW[class]); out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask); } static void int_mask_or(struct spu *spu, int class, u64 mask) { u64 old_mask; old_mask = in_be64(&spu->priv1->int_mask_RW[class]); out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask); } static void int_mask_set(struct spu *spu, int class, u64 mask) { out_be64(&spu->priv1->int_mask_RW[class], mask); } static u64 int_mask_get(struct spu *spu, int class) { return in_be64(&spu->priv1->int_mask_RW[class]); } static void int_stat_clear(struct spu *spu, int class, u64 stat) { out_be64(&spu->priv1->int_stat_RW[class], stat); } static u64 int_stat_get(struct spu *spu, int class) { return in_be64(&spu->priv1->int_stat_RW[class]); } static void cpu_affinity_set(struct spu *spu, int cpu) { u64 target; u64 route; if (nr_cpus_node(spu->node)) { const struct cpumask *spumask = cpumask_of_node(spu->node), *cpumask = cpumask_of_node(cpu_to_node(cpu)); if (!cpumask_intersects(spumask, cpumask)) return; } target = iic_get_target_id(cpu); route = target << 48 | target << 32 | target << 16; out_be64(&spu->priv1->int_route_RW, route); } static u64 mfc_dar_get(struct spu *spu) { return in_be64(&spu->priv1->mfc_dar_RW); } static u64 mfc_dsisr_get(struct spu *spu) { return in_be64(&spu->priv1->mfc_dsisr_RW); } static void mfc_dsisr_set(struct spu *spu, u64 dsisr) { out_be64(&spu->priv1->mfc_dsisr_RW, dsisr); } static void mfc_sdr_setup(struct spu *spu) { out_be64(&spu->priv1->mfc_sdr_RW, mfspr(SPRN_SDR1)); } static void mfc_sr1_set(struct spu *spu, u64 sr1) { out_be64(&spu->priv1->mfc_sr1_RW, sr1); } static u64 mfc_sr1_get(struct spu *spu) { return in_be64(&spu->priv1->mfc_sr1_RW); } static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id) { out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id); } static u64 mfc_tclass_id_get(struct spu *spu) { return in_be64(&spu->priv1->mfc_tclass_id_RW); } static void tlb_invalidate(struct spu *spu) { out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul); } static void resource_allocation_groupID_set(struct spu *spu, u64 id) { out_be64(&spu->priv1->resource_allocation_groupID_RW, id); } static u64 resource_allocation_groupID_get(struct spu *spu) { return in_be64(&spu->priv1->resource_allocation_groupID_RW); } static void resource_allocation_enable_set(struct spu *spu, u64 enable) { out_be64(&spu->priv1->resource_allocation_enable_RW, enable); } static u64 resource_allocation_enable_get(struct spu *spu) { return in_be64(&spu->priv1->resource_allocation_enable_RW); } const struct spu_priv1_ops spu_priv1_mmio_ops = { .int_mask_and = int_mask_and, .int_mask_or = int_mask_or, .int_mask_set = int_mask_set, .int_mask_get = int_mask_get, .int_stat_clear = int_stat_clear, .int_stat_get = int_stat_get, .cpu_affinity_set = cpu_affinity_set, .mfc_dar_get = mfc_dar_get, .mfc_dsisr_get = mfc_dsisr_get, .mfc_dsisr_set = mfc_dsisr_set, .mfc_sdr_setup = mfc_sdr_setup, .mfc_sr1_set = mfc_sr1_set, .mfc_sr1_get = mfc_sr1_get, .mfc_tclass_id_set = mfc_tclass_id_set, .mfc_tclass_id_get = mfc_tclass_id_get, .tlb_invalidate = tlb_invalidate, .resource_allocation_groupID_set = resource_allocation_groupID_set, .resource_allocation_groupID_get = resource_allocation_groupID_get, .resource_allocation_enable_set = resource_allocation_enable_set, .resource_allocation_enable_get = resource_allocation_enable_get, };
linux-master
arch/powerpc/platforms/cell/spu_priv1_mmio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* hw_ops.c - query/set operations on active SPU context. * * Copyright (C) IBM 2005 * Author: Mark Nutter <[email protected]> */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/poll.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <asm/io.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/spu_csa.h> #include <asm/mmu_context.h> #include "spufs.h" static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data) { struct spu *spu = ctx->spu; struct spu_problem __iomem *prob = spu->problem; u32 mbox_stat; int ret = 0; spin_lock_irq(&spu->register_lock); mbox_stat = in_be32(&prob->mb_stat_R); if (mbox_stat & 0x0000ff) { *data = in_be32(&prob->pu_mb_R); ret = 4; } spin_unlock_irq(&spu->register_lock); return ret; } static u32 spu_hw_mbox_stat_read(struct spu_context *ctx) { return in_be32(&ctx->spu->problem->mb_stat_R); } static __poll_t spu_hw_mbox_stat_poll(struct spu_context *ctx, __poll_t events) { struct spu *spu = ctx->spu; __poll_t ret = 0; u32 stat; spin_lock_irq(&spu->register_lock); stat = in_be32(&spu->problem->mb_stat_R); /* if the requested event is there, return the poll mask, otherwise enable the interrupt to get notified, but first mark any pending interrupts as done so we don't get woken up unnecessarily */ if (events & (EPOLLIN | EPOLLRDNORM)) { if (stat & 0xff0000) ret |= EPOLLIN | EPOLLRDNORM; else { spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR); spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); } } if (events & (EPOLLOUT | EPOLLWRNORM)) { if (stat & 0x00ff00) ret = EPOLLOUT | EPOLLWRNORM; else { spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_THRESHOLD_INTR); spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR); } } spin_unlock_irq(&spu->register_lock); return ret; } static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data) { struct spu *spu = ctx->spu; struct spu_problem __iomem *prob = spu->problem; struct spu_priv2 __iomem *priv2 = spu->priv2; int ret; spin_lock_irq(&spu->register_lock); if (in_be32(&prob->mb_stat_R) & 0xff0000) { /* read the first available word */ *data = in_be64(&priv2->puint_mb_R); ret = 4; } else { /* make sure we get woken up by the interrupt */ spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); ret = 0; } spin_unlock_irq(&spu->register_lock); return ret; } static int spu_hw_wbox_write(struct spu_context *ctx, u32 data) { struct spu *spu = ctx->spu; struct spu_problem __iomem *prob = spu->problem; int ret; spin_lock_irq(&spu->register_lock); if (in_be32(&prob->mb_stat_R) & 0x00ff00) { /* we have space to write wbox_data to */ out_be32(&prob->spu_mb_W, data); ret = 4; } else { /* make sure we get woken up by the interrupt when space becomes available */ spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR); ret = 0; } spin_unlock_irq(&spu->register_lock); return ret; } static void spu_hw_signal1_write(struct spu_context *ctx, u32 data) { out_be32(&ctx->spu->problem->signal_notify1, data); } static void spu_hw_signal2_write(struct spu_context *ctx, u32 data) { out_be32(&ctx->spu->problem->signal_notify2, data); } static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val) { struct spu *spu = ctx->spu; struct spu_priv2 __iomem *priv2 = spu->priv2; u64 tmp; spin_lock_irq(&spu->register_lock); tmp = in_be64(&priv2->spu_cfg_RW); if (val) tmp |= 1; else tmp &= ~1; out_be64(&priv2->spu_cfg_RW, tmp); spin_unlock_irq(&spu->register_lock); } static u64 spu_hw_signal1_type_get(struct spu_context *ctx) { return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0); } static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val) { struct spu *spu = ctx->spu; struct spu_priv2 __iomem *priv2 = spu->priv2; u64 tmp; spin_lock_irq(&spu->register_lock); tmp = in_be64(&priv2->spu_cfg_RW); if (val) tmp |= 2; else tmp &= ~2; out_be64(&priv2->spu_cfg_RW, tmp); spin_unlock_irq(&spu->register_lock); } static u64 spu_hw_signal2_type_get(struct spu_context *ctx) { return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0); } static u32 spu_hw_npc_read(struct spu_context *ctx) { return in_be32(&ctx->spu->problem->spu_npc_RW); } static void spu_hw_npc_write(struct spu_context *ctx, u32 val) { out_be32(&ctx->spu->problem->spu_npc_RW, val); } static u32 spu_hw_status_read(struct spu_context *ctx) { return in_be32(&ctx->spu->problem->spu_status_R); } static char *spu_hw_get_ls(struct spu_context *ctx) { return ctx->spu->local_store; } static void spu_hw_privcntl_write(struct spu_context *ctx, u64 val) { out_be64(&ctx->spu->priv2->spu_privcntl_RW, val); } static u32 spu_hw_runcntl_read(struct spu_context *ctx) { return in_be32(&ctx->spu->problem->spu_runcntl_RW); } static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val) { spin_lock_irq(&ctx->spu->register_lock); if (val & SPU_RUNCNTL_ISOLATE) spu_hw_privcntl_write(ctx, SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK); out_be32(&ctx->spu->problem->spu_runcntl_RW, val); spin_unlock_irq(&ctx->spu->register_lock); } static void spu_hw_runcntl_stop(struct spu_context *ctx) { spin_lock_irq(&ctx->spu->register_lock); out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP); while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING) cpu_relax(); spin_unlock_irq(&ctx->spu->register_lock); } static void spu_hw_master_start(struct spu_context *ctx) { struct spu *spu = ctx->spu; u64 sr1; spin_lock_irq(&spu->register_lock); sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK; spu_mfc_sr1_set(spu, sr1); spin_unlock_irq(&spu->register_lock); } static void spu_hw_master_stop(struct spu_context *ctx) { struct spu *spu = ctx->spu; u64 sr1; spin_lock_irq(&spu->register_lock); sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; spu_mfc_sr1_set(spu, sr1); spin_unlock_irq(&spu->register_lock); } static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode) { struct spu_problem __iomem *prob = ctx->spu->problem; int ret; spin_lock_irq(&ctx->spu->register_lock); ret = -EAGAIN; if (in_be32(&prob->dma_querytype_RW)) goto out; ret = 0; out_be32(&prob->dma_querymask_RW, mask); out_be32(&prob->dma_querytype_RW, mode); out: spin_unlock_irq(&ctx->spu->register_lock); return ret; } static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx) { return in_be32(&ctx->spu->problem->dma_tagstatus_R); } static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx) { return in_be32(&ctx->spu->problem->dma_qstatus_R); } static int spu_hw_send_mfc_command(struct spu_context *ctx, struct mfc_dma_command *cmd) { u32 status; struct spu_problem __iomem *prob = ctx->spu->problem; spin_lock_irq(&ctx->spu->register_lock); out_be32(&prob->mfc_lsa_W, cmd->lsa); out_be64(&prob->mfc_ea_W, cmd->ea); out_be32(&prob->mfc_union_W.by32.mfc_size_tag32, cmd->size << 16 | cmd->tag); out_be32(&prob->mfc_union_W.by32.mfc_class_cmd32, cmd->class << 16 | cmd->cmd); status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32); spin_unlock_irq(&ctx->spu->register_lock); switch (status & 0xffff) { case 0: return 0; case 2: return -EAGAIN; default: return -EINVAL; } } static void spu_hw_restart_dma(struct spu_context *ctx) { struct spu_priv2 __iomem *priv2 = ctx->spu->priv2; if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags)) out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); } struct spu_context_ops spu_hw_ops = { .mbox_read = spu_hw_mbox_read, .mbox_stat_read = spu_hw_mbox_stat_read, .mbox_stat_poll = spu_hw_mbox_stat_poll, .ibox_read = spu_hw_ibox_read, .wbox_write = spu_hw_wbox_write, .signal1_write = spu_hw_signal1_write, .signal2_write = spu_hw_signal2_write, .signal1_type_set = spu_hw_signal1_type_set, .signal1_type_get = spu_hw_signal1_type_get, .signal2_type_set = spu_hw_signal2_type_set, .signal2_type_get = spu_hw_signal2_type_get, .npc_read = spu_hw_npc_read, .npc_write = spu_hw_npc_write, .status_read = spu_hw_status_read, .get_ls = spu_hw_get_ls, .privcntl_write = spu_hw_privcntl_write, .runcntl_read = spu_hw_runcntl_read, .runcntl_write = spu_hw_runcntl_write, .runcntl_stop = spu_hw_runcntl_stop, .master_start = spu_hw_master_start, .master_stop = spu_hw_master_stop, .set_mfc_query = spu_hw_set_mfc_query, .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus, .get_mfc_free_elements = spu_hw_get_mfc_free_elements, .send_mfc_command = spu_hw_send_mfc_command, .restart_dma = spu_hw_restart_dma, };
linux-master
arch/powerpc/platforms/cell/spufs/hw_ops.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * spu_restore.c * * (C) Copyright IBM Corp. 2005 * * SPU-side context restore sequence outlined in * Synergistic Processor Element Book IV * * Author: Mark Nutter <[email protected]> */ #ifndef LS_SIZE #define LS_SIZE 0x40000 /* 256K (in bytes) */ #endif typedef unsigned int u32; typedef unsigned long long u64; #include <spu_intrinsics.h> #include <asm/spu_csa.h> #include "spu_utils.h" #define BR_INSTR 0x327fff80 /* br -4 */ #define NOP_INSTR 0x40200000 /* nop */ #define HEQ_INSTR 0x7b000000 /* heq $0, $0 */ #define STOP_INSTR 0x00000000 /* stop 0x0 */ #define ILLEGAL_INSTR 0x00800000 /* illegal instr */ #define RESTORE_COMPLETE 0x00003ffc /* stop 0x3ffc */ static inline void fetch_regs_from_mem(addr64 lscsa_ea) { unsigned int ls = (unsigned int)&regs_spill[0]; unsigned int size = sizeof(regs_spill); unsigned int tag_id = 0; unsigned int cmd = 0x40; /* GET */ spu_writech(MFC_LSA, ls); spu_writech(MFC_EAH, lscsa_ea.ui[0]); spu_writech(MFC_EAL, lscsa_ea.ui[1]); spu_writech(MFC_Size, size); spu_writech(MFC_TagID, tag_id); spu_writech(MFC_Cmd, cmd); } static inline void restore_upper_240kb(addr64 lscsa_ea) { unsigned int ls = 16384; unsigned int list = (unsigned int)&dma_list[0]; unsigned int size = sizeof(dma_list); unsigned int tag_id = 0; unsigned int cmd = 0x44; /* GETL */ /* Restore, Step 4: * Enqueue the GETL command (tag 0) to the MFC SPU command * queue to transfer the upper 240 kb of LS from CSA. */ spu_writech(MFC_LSA, ls); spu_writech(MFC_EAH, lscsa_ea.ui[0]); spu_writech(MFC_EAL, list); spu_writech(MFC_Size, size); spu_writech(MFC_TagID, tag_id); spu_writech(MFC_Cmd, cmd); } static inline void restore_decr(void) { unsigned int offset; unsigned int decr_running; unsigned int decr; /* Restore, Step 6(moved): * If the LSCSA "decrementer running" flag is set * then write the SPU_WrDec channel with the * decrementer value from LSCSA. */ offset = LSCSA_QW_OFFSET(decr_status); decr_running = regs_spill[offset].slot[0] & SPU_DECR_STATUS_RUNNING; if (decr_running) { offset = LSCSA_QW_OFFSET(decr); decr = regs_spill[offset].slot[0]; spu_writech(SPU_WrDec, decr); } } static inline void write_ppu_mb(void) { unsigned int offset; unsigned int data; /* Restore, Step 11: * Write the MFC_WrOut_MB channel with the PPU_MB * data from LSCSA. */ offset = LSCSA_QW_OFFSET(ppu_mb); data = regs_spill[offset].slot[0]; spu_writech(SPU_WrOutMbox, data); } static inline void write_ppuint_mb(void) { unsigned int offset; unsigned int data; /* Restore, Step 12: * Write the MFC_WrInt_MB channel with the PPUINT_MB * data from LSCSA. */ offset = LSCSA_QW_OFFSET(ppuint_mb); data = regs_spill[offset].slot[0]; spu_writech(SPU_WrOutIntrMbox, data); } static inline void restore_fpcr(void) { unsigned int offset; vector unsigned int fpcr; /* Restore, Step 13: * Restore the floating-point status and control * register from the LSCSA. */ offset = LSCSA_QW_OFFSET(fpcr); fpcr = regs_spill[offset].v; spu_mtfpscr(fpcr); } static inline void restore_srr0(void) { unsigned int offset; unsigned int srr0; /* Restore, Step 14: * Restore the SPU SRR0 data from the LSCSA. */ offset = LSCSA_QW_OFFSET(srr0); srr0 = regs_spill[offset].slot[0]; spu_writech(SPU_WrSRR0, srr0); } static inline void restore_event_mask(void) { unsigned int offset; unsigned int event_mask; /* Restore, Step 15: * Restore the SPU_RdEventMsk data from the LSCSA. */ offset = LSCSA_QW_OFFSET(event_mask); event_mask = regs_spill[offset].slot[0]; spu_writech(SPU_WrEventMask, event_mask); } static inline void restore_tag_mask(void) { unsigned int offset; unsigned int tag_mask; /* Restore, Step 16: * Restore the SPU_RdTagMsk data from the LSCSA. */ offset = LSCSA_QW_OFFSET(tag_mask); tag_mask = regs_spill[offset].slot[0]; spu_writech(MFC_WrTagMask, tag_mask); } static inline void restore_complete(void) { extern void exit_fini(void); unsigned int *exit_instrs = (unsigned int *)exit_fini; unsigned int offset; unsigned int stopped_status; unsigned int stopped_code; /* Restore, Step 18: * Issue a stop-and-signal instruction with * "good context restore" signal value. * * Restore, Step 19: * There may be additional instructions placed * here by the PPE Sequence for SPU Context * Restore in order to restore the correct * "stopped state". * * This step is handled here by analyzing the * LSCSA.stopped_status and then modifying the * exit() function to behave appropriately. */ offset = LSCSA_QW_OFFSET(stopped_status); stopped_status = regs_spill[offset].slot[0]; stopped_code = regs_spill[offset].slot[1]; switch (stopped_status) { case SPU_STOPPED_STATUS_P_I: /* SPU_Status[P,I]=1. Add illegal instruction * followed by stop-and-signal instruction after * end of restore code. */ exit_instrs[0] = RESTORE_COMPLETE; exit_instrs[1] = ILLEGAL_INSTR; exit_instrs[2] = STOP_INSTR | stopped_code; break; case SPU_STOPPED_STATUS_P_H: /* SPU_Status[P,H]=1. Add 'heq $0, $0' followed * by stop-and-signal instruction after end of * restore code. */ exit_instrs[0] = RESTORE_COMPLETE; exit_instrs[1] = HEQ_INSTR; exit_instrs[2] = STOP_INSTR | stopped_code; break; case SPU_STOPPED_STATUS_S_P: /* SPU_Status[S,P]=1. Add nop instruction * followed by 'br -4' after end of restore * code. */ exit_instrs[0] = RESTORE_COMPLETE; exit_instrs[1] = STOP_INSTR | stopped_code; exit_instrs[2] = NOP_INSTR; exit_instrs[3] = BR_INSTR; break; case SPU_STOPPED_STATUS_S_I: /* SPU_Status[S,I]=1. Add illegal instruction * followed by 'br -4' after end of restore code. */ exit_instrs[0] = RESTORE_COMPLETE; exit_instrs[1] = ILLEGAL_INSTR; exit_instrs[2] = NOP_INSTR; exit_instrs[3] = BR_INSTR; break; case SPU_STOPPED_STATUS_I: /* SPU_Status[I]=1. Add illegal instruction followed * by infinite loop after end of restore sequence. */ exit_instrs[0] = RESTORE_COMPLETE; exit_instrs[1] = ILLEGAL_INSTR; exit_instrs[2] = NOP_INSTR; exit_instrs[3] = BR_INSTR; break; case SPU_STOPPED_STATUS_S: /* SPU_Status[S]=1. Add two 'nop' instructions. */ exit_instrs[0] = RESTORE_COMPLETE; exit_instrs[1] = NOP_INSTR; exit_instrs[2] = NOP_INSTR; exit_instrs[3] = BR_INSTR; break; case SPU_STOPPED_STATUS_H: /* SPU_Status[H]=1. Add 'heq $0, $0' instruction * after end of restore code. */ exit_instrs[0] = RESTORE_COMPLETE; exit_instrs[1] = HEQ_INSTR; exit_instrs[2] = NOP_INSTR; exit_instrs[3] = BR_INSTR; break; case SPU_STOPPED_STATUS_P: /* SPU_Status[P]=1. Add stop-and-signal instruction * after end of restore code. */ exit_instrs[0] = RESTORE_COMPLETE; exit_instrs[1] = STOP_INSTR | stopped_code; break; case SPU_STOPPED_STATUS_R: /* SPU_Status[I,S,H,P,R]=0. Add infinite loop. */ exit_instrs[0] = RESTORE_COMPLETE; exit_instrs[1] = NOP_INSTR; exit_instrs[2] = NOP_INSTR; exit_instrs[3] = BR_INSTR; break; default: /* SPU_Status[R]=1. No additional instructions. */ break; } spu_sync(); } /** * main - entry point for SPU-side context restore. * * This code deviates from the documented sequence in the * following aspects: * * 1. The EA for LSCSA is passed from PPE in the * signal notification channels. * 2. The register spill area is pulled by SPU * into LS, rather than pushed by PPE. * 3. All 128 registers are restored by exit(). * 4. The exit() function is modified at run * time in order to properly restore the * SPU_Status register. */ int main() { addr64 lscsa_ea; lscsa_ea.ui[0] = spu_readch(SPU_RdSigNotify1); lscsa_ea.ui[1] = spu_readch(SPU_RdSigNotify2); fetch_regs_from_mem(lscsa_ea); set_event_mask(); /* Step 1. */ set_tag_mask(); /* Step 2. */ build_dma_list(lscsa_ea); /* Step 3. */ restore_upper_240kb(lscsa_ea); /* Step 4. */ /* Step 5: done by 'exit'. */ enqueue_putllc(lscsa_ea); /* Step 7. */ set_tag_update(); /* Step 8. */ read_tag_status(); /* Step 9. */ restore_decr(); /* moved Step 6. */ read_llar_status(); /* Step 10. */ write_ppu_mb(); /* Step 11. */ write_ppuint_mb(); /* Step 12. */ restore_fpcr(); /* Step 13. */ restore_srr0(); /* Step 14. */ restore_event_mask(); /* Step 15. */ restore_tag_mask(); /* Step 16. */ /* Step 17. done by 'exit'. */ restore_complete(); /* Step 18. */ return 0; }
linux-master
arch/powerpc/platforms/cell/spufs/spu_restore.c
// SPDX-License-Identifier: GPL-2.0-or-later /* sched.c - SPU scheduler. * * Copyright (C) IBM 2005 * Author: Mark Nutter <[email protected]> * * 2006-03-31 NUMA domains added. */ #undef DEBUG #include <linux/errno.h> #include <linux/sched/signal.h> #include <linux/sched/loadavg.h> #include <linux/sched/rt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/completion.h> #include <linux/vmalloc.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/numa.h> #include <linux/mutex.h> #include <linux/notifier.h> #include <linux/kthread.h> #include <linux/pid_namespace.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/spu.h> #include <asm/spu_csa.h> #include <asm/spu_priv1.h> #include "spufs.h" #define CREATE_TRACE_POINTS #include "sputrace.h" struct spu_prio_array { DECLARE_BITMAP(bitmap, MAX_PRIO); struct list_head runq[MAX_PRIO]; spinlock_t runq_lock; int nr_waiting; }; static unsigned long spu_avenrun[3]; static struct spu_prio_array *spu_prio; static struct task_struct *spusched_task; static struct timer_list spusched_timer; static struct timer_list spuloadavg_timer; /* * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). */ #define NORMAL_PRIO 120 /* * Frequency of the spu scheduler tick. By default we do one SPU scheduler * tick for every 10 CPU scheduler ticks. */ #define SPUSCHED_TICK (10) /* * These are the 'tuning knobs' of the scheduler: * * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs. */ #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1) #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK)) #define SCALE_PRIO(x, prio) \ max(x * (MAX_PRIO - prio) / (NICE_WIDTH / 2), MIN_SPU_TIMESLICE) /* * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values: * [800ms ... 100ms ... 5ms] * * The higher a thread's priority, the bigger timeslices * it gets during one round of execution. But even the lowest * priority thread gets MIN_TIMESLICE worth of execution time. */ void spu_set_timeslice(struct spu_context *ctx) { if (ctx->prio < NORMAL_PRIO) ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio); else ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio); } /* * Update scheduling information from the owning thread. */ void __spu_update_sched_info(struct spu_context *ctx) { /* * assert that the context is not on the runqueue, so it is safe * to change its scheduling parameters. */ BUG_ON(!list_empty(&ctx->rq)); /* * 32-Bit assignments are atomic on powerpc, and we don't care about * memory ordering here because retrieving the controlling thread is * per definition racy. */ ctx->tid = current->pid; /* * We do our own priority calculations, so we normally want * ->static_prio to start with. Unfortunately this field * contains junk for threads with a realtime scheduling * policy so we have to look at ->prio in this case. */ if (rt_prio(current->prio)) ctx->prio = current->prio; else ctx->prio = current->static_prio; ctx->policy = current->policy; /* * TO DO: the context may be loaded, so we may need to activate * it again on a different node. But it shouldn't hurt anything * to update its parameters, because we know that the scheduler * is not actively looking at this field, since it is not on the * runqueue. The context will be rescheduled on the proper node * if it is timesliced or preempted. */ cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr); /* Save the current cpu id for spu interrupt routing. */ ctx->last_ran = raw_smp_processor_id(); } void spu_update_sched_info(struct spu_context *ctx) { int node; if (ctx->state == SPU_STATE_RUNNABLE) { node = ctx->spu->node; /* * Take list_mutex to sync with find_victim(). */ mutex_lock(&cbe_spu_info[node].list_mutex); __spu_update_sched_info(ctx); mutex_unlock(&cbe_spu_info[node].list_mutex); } else { __spu_update_sched_info(ctx); } } static int __node_allowed(struct spu_context *ctx, int node) { if (nr_cpus_node(node)) { const struct cpumask *mask = cpumask_of_node(node); if (cpumask_intersects(mask, &ctx->cpus_allowed)) return 1; } return 0; } static int node_allowed(struct spu_context *ctx, int node) { int rval; spin_lock(&spu_prio->runq_lock); rval = __node_allowed(ctx, node); spin_unlock(&spu_prio->runq_lock); return rval; } void do_notify_spus_active(void) { int node; /* * Wake up the active spu_contexts. */ for_each_online_node(node) { struct spu *spu; mutex_lock(&cbe_spu_info[node].list_mutex); list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { if (spu->alloc_state != SPU_FREE) { struct spu_context *ctx = spu->ctx; set_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags); mb(); wake_up_all(&ctx->stop_wq); } } mutex_unlock(&cbe_spu_info[node].list_mutex); } } /** * spu_bind_context - bind spu context to physical spu * @spu: physical spu to bind to * @ctx: context to bind */ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) { spu_context_trace(spu_bind_context__enter, ctx, spu); spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); if (ctx->flags & SPU_CREATE_NOSCHED) atomic_inc(&cbe_spu_info[spu->node].reserved_spus); ctx->stats.slb_flt_base = spu->stats.slb_flt; ctx->stats.class2_intr_base = spu->stats.class2_intr; spu_associate_mm(spu, ctx->owner); spin_lock_irq(&spu->register_lock); spu->ctx = ctx; spu->flags = 0; ctx->spu = spu; ctx->ops = &spu_hw_ops; spu->pid = current->pid; spu->tgid = current->tgid; spu->ibox_callback = spufs_ibox_callback; spu->wbox_callback = spufs_wbox_callback; spu->stop_callback = spufs_stop_callback; spu->mfc_callback = spufs_mfc_callback; spin_unlock_irq(&spu->register_lock); spu_unmap_mappings(ctx); spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); spu_restore(&ctx->csa, spu); spu->timestamp = jiffies; ctx->state = SPU_STATE_RUNNABLE; spuctx_switch_state(ctx, SPU_UTIL_USER); } /* * Must be used with the list_mutex held. */ static inline int sched_spu(struct spu *spu) { BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex)); return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED)); } static void aff_merge_remaining_ctxs(struct spu_gang *gang) { struct spu_context *ctx; list_for_each_entry(ctx, &gang->aff_list_head, aff_list) { if (list_empty(&ctx->aff_list)) list_add(&ctx->aff_list, &gang->aff_list_head); } gang->aff_flags |= AFF_MERGED; } static void aff_set_offsets(struct spu_gang *gang) { struct spu_context *ctx; int offset; offset = -1; list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, aff_list) { if (&ctx->aff_list == &gang->aff_list_head) break; ctx->aff_offset = offset--; } offset = 0; list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) { if (&ctx->aff_list == &gang->aff_list_head) break; ctx->aff_offset = offset++; } gang->aff_flags |= AFF_OFFSETS_SET; } static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff, int group_size, int lowest_offset) { struct spu *spu; int node, n; /* * TODO: A better algorithm could be used to find a good spu to be * used as reference location for the ctxs chain. */ node = cpu_to_node(raw_smp_processor_id()); for (n = 0; n < MAX_NUMNODES; n++, node++) { /* * "available_spus" counts how many spus are not potentially * going to be used by other affinity gangs whose reference * context is already in place. Although this code seeks to * avoid having affinity gangs with a summed amount of * contexts bigger than the amount of spus in the node, * this may happen sporadically. In this case, available_spus * becomes negative, which is harmless. */ int available_spus; node = (node < MAX_NUMNODES) ? node : 0; if (!node_allowed(ctx, node)) continue; available_spus = 0; mutex_lock(&cbe_spu_info[node].list_mutex); list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset && spu->ctx->gang->aff_ref_spu) available_spus -= spu->ctx->gang->contexts; available_spus++; } if (available_spus < ctx->gang->contexts) { mutex_unlock(&cbe_spu_info[node].list_mutex); continue; } list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { if ((!mem_aff || spu->has_mem_affinity) && sched_spu(spu)) { mutex_unlock(&cbe_spu_info[node].list_mutex); return spu; } } mutex_unlock(&cbe_spu_info[node].list_mutex); } return NULL; } static void aff_set_ref_point_location(struct spu_gang *gang) { int mem_aff, gs, lowest_offset; struct spu_context *tmp, *ctx; mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM; lowest_offset = 0; gs = 0; list_for_each_entry(tmp, &gang->aff_list_head, aff_list) gs++; list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, aff_list) { if (&ctx->aff_list == &gang->aff_list_head) break; lowest_offset = ctx->aff_offset; } gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs, lowest_offset); } static struct spu *ctx_location(struct spu *ref, int offset, int node) { struct spu *spu; spu = NULL; if (offset >= 0) { list_for_each_entry(spu, ref->aff_list.prev, aff_list) { BUG_ON(spu->node != node); if (offset == 0) break; if (sched_spu(spu)) offset--; } } else { list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) { BUG_ON(spu->node != node); if (offset == 0) break; if (sched_spu(spu)) offset++; } } return spu; } /* * affinity_check is called each time a context is going to be scheduled. * It returns the spu ptr on which the context must run. */ static int has_affinity(struct spu_context *ctx) { struct spu_gang *gang = ctx->gang; if (list_empty(&ctx->aff_list)) return 0; if (atomic_read(&ctx->gang->aff_sched_count) == 0) ctx->gang->aff_ref_spu = NULL; if (!gang->aff_ref_spu) { if (!(gang->aff_flags & AFF_MERGED)) aff_merge_remaining_ctxs(gang); if (!(gang->aff_flags & AFF_OFFSETS_SET)) aff_set_offsets(gang); aff_set_ref_point_location(gang); } return gang->aff_ref_spu != NULL; } /** * spu_unbind_context - unbind spu context from physical spu * @spu: physical spu to unbind from * @ctx: context to unbind */ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) { u32 status; spu_context_trace(spu_unbind_context__enter, ctx, spu); spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); if (spu->ctx->flags & SPU_CREATE_NOSCHED) atomic_dec(&cbe_spu_info[spu->node].reserved_spus); if (ctx->gang) /* * If ctx->gang->aff_sched_count is positive, SPU affinity is * being considered in this gang. Using atomic_dec_if_positive * allow us to skip an explicit check for affinity in this gang */ atomic_dec_if_positive(&ctx->gang->aff_sched_count); spu_unmap_mappings(ctx); spu_save(&ctx->csa, spu); spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); spin_lock_irq(&spu->register_lock); spu->timestamp = jiffies; ctx->state = SPU_STATE_SAVED; spu->ibox_callback = NULL; spu->wbox_callback = NULL; spu->stop_callback = NULL; spu->mfc_callback = NULL; spu->pid = 0; spu->tgid = 0; ctx->ops = &spu_backing_ops; spu->flags = 0; spu->ctx = NULL; spin_unlock_irq(&spu->register_lock); spu_associate_mm(spu, NULL); ctx->stats.slb_flt += (spu->stats.slb_flt - ctx->stats.slb_flt_base); ctx->stats.class2_intr += (spu->stats.class2_intr - ctx->stats.class2_intr_base); /* This maps the underlying spu state to idle */ spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); ctx->spu = NULL; if (spu_stopped(ctx, &status)) wake_up_all(&ctx->stop_wq); } /** * spu_add_to_rq - add a context to the runqueue * @ctx: context to add */ static void __spu_add_to_rq(struct spu_context *ctx) { /* * Unfortunately this code path can be called from multiple threads * on behalf of a single context due to the way the problem state * mmap support works. * * Fortunately we need to wake up all these threads at the same time * and can simply skip the runqueue addition for every but the first * thread getting into this codepath. * * It's still quite hacky, and long-term we should proxy all other * threads through the owner thread so that spu_run is in control * of all the scheduling activity for a given context. */ if (list_empty(&ctx->rq)) { list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); set_bit(ctx->prio, spu_prio->bitmap); if (!spu_prio->nr_waiting++) mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); } } static void spu_add_to_rq(struct spu_context *ctx) { spin_lock(&spu_prio->runq_lock); __spu_add_to_rq(ctx); spin_unlock(&spu_prio->runq_lock); } static void __spu_del_from_rq(struct spu_context *ctx) { int prio = ctx->prio; if (!list_empty(&ctx->rq)) { if (!--spu_prio->nr_waiting) del_timer(&spusched_timer); list_del_init(&ctx->rq); if (list_empty(&spu_prio->runq[prio])) clear_bit(prio, spu_prio->bitmap); } } void spu_del_from_rq(struct spu_context *ctx) { spin_lock(&spu_prio->runq_lock); __spu_del_from_rq(ctx); spin_unlock(&spu_prio->runq_lock); } static void spu_prio_wait(struct spu_context *ctx) { DEFINE_WAIT(wait); /* * The caller must explicitly wait for a context to be loaded * if the nosched flag is set. If NOSCHED is not set, the caller * queues the context and waits for an spu event or error. */ BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED)); spin_lock(&spu_prio->runq_lock); prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); if (!signal_pending(current)) { __spu_add_to_rq(ctx); spin_unlock(&spu_prio->runq_lock); mutex_unlock(&ctx->state_mutex); schedule(); mutex_lock(&ctx->state_mutex); spin_lock(&spu_prio->runq_lock); __spu_del_from_rq(ctx); } spin_unlock(&spu_prio->runq_lock); __set_current_state(TASK_RUNNING); remove_wait_queue(&ctx->stop_wq, &wait); } static struct spu *spu_get_idle(struct spu_context *ctx) { struct spu *spu, *aff_ref_spu; int node, n; spu_context_nospu_trace(spu_get_idle__enter, ctx); if (ctx->gang) { mutex_lock(&ctx->gang->aff_mutex); if (has_affinity(ctx)) { aff_ref_spu = ctx->gang->aff_ref_spu; atomic_inc(&ctx->gang->aff_sched_count); mutex_unlock(&ctx->gang->aff_mutex); node = aff_ref_spu->node; mutex_lock(&cbe_spu_info[node].list_mutex); spu = ctx_location(aff_ref_spu, ctx->aff_offset, node); if (spu && spu->alloc_state == SPU_FREE) goto found; mutex_unlock(&cbe_spu_info[node].list_mutex); atomic_dec(&ctx->gang->aff_sched_count); goto not_found; } mutex_unlock(&ctx->gang->aff_mutex); } node = cpu_to_node(raw_smp_processor_id()); for (n = 0; n < MAX_NUMNODES; n++, node++) { node = (node < MAX_NUMNODES) ? node : 0; if (!node_allowed(ctx, node)) continue; mutex_lock(&cbe_spu_info[node].list_mutex); list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { if (spu->alloc_state == SPU_FREE) goto found; } mutex_unlock(&cbe_spu_info[node].list_mutex); } not_found: spu_context_nospu_trace(spu_get_idle__not_found, ctx); return NULL; found: spu->alloc_state = SPU_USED; mutex_unlock(&cbe_spu_info[node].list_mutex); spu_context_trace(spu_get_idle__found, ctx, spu); spu_init_channels(spu); return spu; } /** * find_victim - find a lower priority context to preempt * @ctx: candidate context for running * * Returns the freed physical spu to run the new context on. */ static struct spu *find_victim(struct spu_context *ctx) { struct spu_context *victim = NULL; struct spu *spu; int node, n; spu_context_nospu_trace(spu_find_victim__enter, ctx); /* * Look for a possible preemption candidate on the local node first. * If there is no candidate look at the other nodes. This isn't * exactly fair, but so far the whole spu scheduler tries to keep * a strong node affinity. We might want to fine-tune this in * the future. */ restart: node = cpu_to_node(raw_smp_processor_id()); for (n = 0; n < MAX_NUMNODES; n++, node++) { node = (node < MAX_NUMNODES) ? node : 0; if (!node_allowed(ctx, node)) continue; mutex_lock(&cbe_spu_info[node].list_mutex); list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { struct spu_context *tmp = spu->ctx; if (tmp && tmp->prio > ctx->prio && !(tmp->flags & SPU_CREATE_NOSCHED) && (!victim || tmp->prio > victim->prio)) { victim = spu->ctx; } } if (victim) get_spu_context(victim); mutex_unlock(&cbe_spu_info[node].list_mutex); if (victim) { /* * This nests ctx->state_mutex, but we always lock * higher priority contexts before lower priority * ones, so this is safe until we introduce * priority inheritance schemes. * * XXX if the highest priority context is locked, * this can loop a long time. Might be better to * look at another context or give up after X retries. */ if (!mutex_trylock(&victim->state_mutex)) { put_spu_context(victim); victim = NULL; goto restart; } spu = victim->spu; if (!spu || victim->prio <= ctx->prio) { /* * This race can happen because we've dropped * the active list mutex. Not a problem, just * restart the search. */ mutex_unlock(&victim->state_mutex); put_spu_context(victim); victim = NULL; goto restart; } spu_context_trace(__spu_deactivate__unload, ctx, spu); mutex_lock(&cbe_spu_info[node].list_mutex); cbe_spu_info[node].nr_active--; spu_unbind_context(spu, victim); mutex_unlock(&cbe_spu_info[node].list_mutex); victim->stats.invol_ctx_switch++; spu->stats.invol_ctx_switch++; if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags)) spu_add_to_rq(victim); mutex_unlock(&victim->state_mutex); put_spu_context(victim); return spu; } } return NULL; } static void __spu_schedule(struct spu *spu, struct spu_context *ctx) { int node = spu->node; int success = 0; spu_set_timeslice(ctx); mutex_lock(&cbe_spu_info[node].list_mutex); if (spu->ctx == NULL) { spu_bind_context(spu, ctx); cbe_spu_info[node].nr_active++; spu->alloc_state = SPU_USED; success = 1; } mutex_unlock(&cbe_spu_info[node].list_mutex); if (success) wake_up_all(&ctx->run_wq); else spu_add_to_rq(ctx); } static void spu_schedule(struct spu *spu, struct spu_context *ctx) { /* not a candidate for interruptible because it's called either from the scheduler thread or from spu_deactivate */ mutex_lock(&ctx->state_mutex); if (ctx->state == SPU_STATE_SAVED) __spu_schedule(spu, ctx); spu_release(ctx); } /** * spu_unschedule - remove a context from a spu, and possibly release it. * @spu: The SPU to unschedule from * @ctx: The context currently scheduled on the SPU * @free_spu Whether to free the SPU for other contexts * * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the * SPU is made available for other contexts (ie, may be returned by * spu_get_idle). If this is zero, the caller is expected to schedule another * context to this spu. * * Should be called with ctx->state_mutex held. */ static void spu_unschedule(struct spu *spu, struct spu_context *ctx, int free_spu) { int node = spu->node; mutex_lock(&cbe_spu_info[node].list_mutex); cbe_spu_info[node].nr_active--; if (free_spu) spu->alloc_state = SPU_FREE; spu_unbind_context(spu, ctx); ctx->stats.invol_ctx_switch++; spu->stats.invol_ctx_switch++; mutex_unlock(&cbe_spu_info[node].list_mutex); } /** * spu_activate - find a free spu for a context and execute it * @ctx: spu context to schedule * @flags: flags (currently ignored) * * Tries to find a free spu to run @ctx. If no free spu is available * add the context to the runqueue so it gets woken up once an spu * is available. */ int spu_activate(struct spu_context *ctx, unsigned long flags) { struct spu *spu; /* * If there are multiple threads waiting for a single context * only one actually binds the context while the others will * only be able to acquire the state_mutex once the context * already is in runnable state. */ if (ctx->spu) return 0; spu_activate_top: if (signal_pending(current)) return -ERESTARTSYS; spu = spu_get_idle(ctx); /* * If this is a realtime thread we try to get it running by * preempting a lower priority thread. */ if (!spu && rt_prio(ctx->prio)) spu = find_victim(ctx); if (spu) { unsigned long runcntl; runcntl = ctx->ops->runcntl_read(ctx); __spu_schedule(spu, ctx); if (runcntl & SPU_RUNCNTL_RUNNABLE) spuctx_switch_state(ctx, SPU_UTIL_USER); return 0; } if (ctx->flags & SPU_CREATE_NOSCHED) { spu_prio_wait(ctx); goto spu_activate_top; } spu_add_to_rq(ctx); return 0; } /** * grab_runnable_context - try to find a runnable context * * Remove the highest priority context on the runqueue and return it * to the caller. Returns %NULL if no runnable context was found. */ static struct spu_context *grab_runnable_context(int prio, int node) { struct spu_context *ctx; int best; spin_lock(&spu_prio->runq_lock); best = find_first_bit(spu_prio->bitmap, prio); while (best < prio) { struct list_head *rq = &spu_prio->runq[best]; list_for_each_entry(ctx, rq, rq) { /* XXX(hch): check for affinity here as well */ if (__node_allowed(ctx, node)) { __spu_del_from_rq(ctx); goto found; } } best++; } ctx = NULL; found: spin_unlock(&spu_prio->runq_lock); return ctx; } static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) { struct spu *spu = ctx->spu; struct spu_context *new = NULL; if (spu) { new = grab_runnable_context(max_prio, spu->node); if (new || force) { spu_unschedule(spu, ctx, new == NULL); if (new) { if (new->flags & SPU_CREATE_NOSCHED) wake_up(&new->stop_wq); else { spu_release(ctx); spu_schedule(spu, new); /* this one can't easily be made interruptible */ mutex_lock(&ctx->state_mutex); } } } } return new != NULL; } /** * spu_deactivate - unbind a context from it's physical spu * @ctx: spu context to unbind * * Unbind @ctx from the physical spu it is running on and schedule * the highest priority context to run on the freed physical spu. */ void spu_deactivate(struct spu_context *ctx) { spu_context_nospu_trace(spu_deactivate__enter, ctx); __spu_deactivate(ctx, 1, MAX_PRIO); } /** * spu_yield - yield a physical spu if others are waiting * @ctx: spu context to yield * * Check if there is a higher priority context waiting and if yes * unbind @ctx from the physical spu and schedule the highest * priority context to run on the freed physical spu instead. */ void spu_yield(struct spu_context *ctx) { spu_context_nospu_trace(spu_yield__enter, ctx); if (!(ctx->flags & SPU_CREATE_NOSCHED)) { mutex_lock(&ctx->state_mutex); __spu_deactivate(ctx, 0, MAX_PRIO); mutex_unlock(&ctx->state_mutex); } } static noinline void spusched_tick(struct spu_context *ctx) { struct spu_context *new = NULL; struct spu *spu = NULL; if (spu_acquire(ctx)) BUG(); /* a kernel thread never has signals pending */ if (ctx->state != SPU_STATE_RUNNABLE) goto out; if (ctx->flags & SPU_CREATE_NOSCHED) goto out; if (ctx->policy == SCHED_FIFO) goto out; if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) goto out; spu = ctx->spu; spu_context_trace(spusched_tick__preempt, ctx, spu); new = grab_runnable_context(ctx->prio + 1, spu->node); if (new) { spu_unschedule(spu, ctx, 0); if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spu_add_to_rq(ctx); } else { spu_context_nospu_trace(spusched_tick__newslice, ctx); if (!ctx->time_slice) ctx->time_slice++; } out: spu_release(ctx); if (new) spu_schedule(spu, new); } /** * count_active_contexts - count nr of active tasks * * Return the number of tasks currently running or waiting to run. * * Note that we don't take runq_lock / list_mutex here. Reading * a single 32bit value is atomic on powerpc, and we don't care * about memory ordering issues here. */ static unsigned long count_active_contexts(void) { int nr_active = 0, node; for (node = 0; node < MAX_NUMNODES; node++) nr_active += cbe_spu_info[node].nr_active; nr_active += spu_prio->nr_waiting; return nr_active; } /** * spu_calc_load - update the avenrun load estimates. * * No locking against reading these values from userspace, as for * the CPU loadavg code. */ static void spu_calc_load(void) { unsigned long active_tasks; /* fixed-point */ active_tasks = count_active_contexts() * FIXED_1; spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks); spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks); spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks); } static void spusched_wake(struct timer_list *unused) { mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); wake_up_process(spusched_task); } static void spuloadavg_wake(struct timer_list *unused) { mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ); spu_calc_load(); } static int spusched_thread(void *unused) { struct spu *spu; int node; while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); schedule(); for (node = 0; node < MAX_NUMNODES; node++) { struct mutex *mtx = &cbe_spu_info[node].list_mutex; mutex_lock(mtx); list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) { struct spu_context *ctx = spu->ctx; if (ctx) { get_spu_context(ctx); mutex_unlock(mtx); spusched_tick(ctx); mutex_lock(mtx); put_spu_context(ctx); } } mutex_unlock(mtx); } } return 0; } void spuctx_switch_state(struct spu_context *ctx, enum spu_utilization_state new_state) { unsigned long long curtime; signed long long delta; struct spu *spu; enum spu_utilization_state old_state; int node; curtime = ktime_get_ns(); delta = curtime - ctx->stats.tstamp; WARN_ON(!mutex_is_locked(&ctx->state_mutex)); WARN_ON(delta < 0); spu = ctx->spu; old_state = ctx->stats.util_state; ctx->stats.util_state = new_state; ctx->stats.tstamp = curtime; /* * Update the physical SPU utilization statistics. */ if (spu) { ctx->stats.times[old_state] += delta; spu->stats.times[old_state] += delta; spu->stats.util_state = new_state; spu->stats.tstamp = curtime; node = spu->node; if (old_state == SPU_UTIL_USER) atomic_dec(&cbe_spu_info[node].busy_spus); if (new_state == SPU_UTIL_USER) atomic_inc(&cbe_spu_info[node].busy_spus); } } #ifdef CONFIG_PROC_FS static int show_spu_loadavg(struct seq_file *s, void *private) { int a, b, c; a = spu_avenrun[0] + (FIXED_1/200); b = spu_avenrun[1] + (FIXED_1/200); c = spu_avenrun[2] + (FIXED_1/200); /* * Note that last_pid doesn't really make much sense for the * SPU loadavg (it even seems very odd on the CPU side...), * but we include it here to have a 100% compatible interface. */ seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n", LOAD_INT(a), LOAD_FRAC(a), LOAD_INT(b), LOAD_FRAC(b), LOAD_INT(c), LOAD_FRAC(c), count_active_contexts(), atomic_read(&nr_spu_contexts), idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); return 0; } #endif int __init spu_sched_init(void) { struct proc_dir_entry *entry; int err = -ENOMEM, i; spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL); if (!spu_prio) goto out; for (i = 0; i < MAX_PRIO; i++) { INIT_LIST_HEAD(&spu_prio->runq[i]); __clear_bit(i, spu_prio->bitmap); } spin_lock_init(&spu_prio->runq_lock); timer_setup(&spusched_timer, spusched_wake, 0); timer_setup(&spuloadavg_timer, spuloadavg_wake, 0); spusched_task = kthread_run(spusched_thread, NULL, "spusched"); if (IS_ERR(spusched_task)) { err = PTR_ERR(spusched_task); goto out_free_spu_prio; } mod_timer(&spuloadavg_timer, 0); entry = proc_create_single("spu_loadavg", 0, NULL, show_spu_loadavg); if (!entry) goto out_stop_kthread; pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n", SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE); return 0; out_stop_kthread: kthread_stop(spusched_task); out_free_spu_prio: kfree(spu_prio); out: return err; } void spu_sched_exit(void) { struct spu *spu; int node; remove_proc_entry("spu_loadavg", NULL); del_timer_sync(&spusched_timer); del_timer_sync(&spuloadavg_timer); kthread_stop(spusched_task); for (node = 0; node < MAX_NUMNODES; node++) { mutex_lock(&cbe_spu_info[node].list_mutex); list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) if (spu->alloc_state != SPU_FREE) spu->alloc_state = SPU_FREE; mutex_unlock(&cbe_spu_info[node].list_mutex); } kfree(spu_prio); }
linux-master
arch/powerpc/platforms/cell/spufs/sched.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * spu_save.c * * (C) Copyright IBM Corp. 2005 * * SPU-side context save sequence outlined in * Synergistic Processor Element Book IV * * Author: Mark Nutter <[email protected]> */ #ifndef LS_SIZE #define LS_SIZE 0x40000 /* 256K (in bytes) */ #endif typedef unsigned int u32; typedef unsigned long long u64; #include <spu_intrinsics.h> #include <asm/spu_csa.h> #include "spu_utils.h" static inline void save_event_mask(void) { unsigned int offset; /* Save, Step 2: * Read the SPU_RdEventMsk channel and save to the LSCSA. */ offset = LSCSA_QW_OFFSET(event_mask); regs_spill[offset].slot[0] = spu_readch(SPU_RdEventMask); } static inline void save_tag_mask(void) { unsigned int offset; /* Save, Step 3: * Read the SPU_RdTagMsk channel and save to the LSCSA. */ offset = LSCSA_QW_OFFSET(tag_mask); regs_spill[offset].slot[0] = spu_readch(MFC_RdTagMask); } static inline void save_upper_240kb(addr64 lscsa_ea) { unsigned int ls = 16384; unsigned int list = (unsigned int)&dma_list[0]; unsigned int size = sizeof(dma_list); unsigned int tag_id = 0; unsigned int cmd = 0x24; /* PUTL */ /* Save, Step 7: * Enqueue the PUTL command (tag 0) to the MFC SPU command * queue to transfer the remaining 240 kb of LS to CSA. */ spu_writech(MFC_LSA, ls); spu_writech(MFC_EAH, lscsa_ea.ui[0]); spu_writech(MFC_EAL, list); spu_writech(MFC_Size, size); spu_writech(MFC_TagID, tag_id); spu_writech(MFC_Cmd, cmd); } static inline void save_fpcr(void) { // vector unsigned int fpcr; unsigned int offset; /* Save, Step 9: * Issue the floating-point status and control register * read instruction, and save to the LSCSA. */ offset = LSCSA_QW_OFFSET(fpcr); regs_spill[offset].v = spu_mffpscr(); } static inline void save_decr(void) { unsigned int offset; /* Save, Step 10: * Read and save the SPU_RdDec channel data to * the LSCSA. */ offset = LSCSA_QW_OFFSET(decr); regs_spill[offset].slot[0] = spu_readch(SPU_RdDec); } static inline void save_srr0(void) { unsigned int offset; /* Save, Step 11: * Read and save the SPU_WSRR0 channel data to * the LSCSA. */ offset = LSCSA_QW_OFFSET(srr0); regs_spill[offset].slot[0] = spu_readch(SPU_RdSRR0); } static inline void spill_regs_to_mem(addr64 lscsa_ea) { unsigned int ls = (unsigned int)&regs_spill[0]; unsigned int size = sizeof(regs_spill); unsigned int tag_id = 0; unsigned int cmd = 0x20; /* PUT */ /* Save, Step 13: * Enqueue a PUT command (tag 0) to send the LSCSA * to the CSA. */ spu_writech(MFC_LSA, ls); spu_writech(MFC_EAH, lscsa_ea.ui[0]); spu_writech(MFC_EAL, lscsa_ea.ui[1]); spu_writech(MFC_Size, size); spu_writech(MFC_TagID, tag_id); spu_writech(MFC_Cmd, cmd); } static inline void enqueue_sync(addr64 lscsa_ea) { unsigned int tag_id = 0; unsigned int cmd = 0xCC; /* Save, Step 14: * Enqueue an MFC_SYNC command (tag 0). */ spu_writech(MFC_TagID, tag_id); spu_writech(MFC_Cmd, cmd); } static inline void save_complete(void) { /* Save, Step 18: * Issue a stop-and-signal instruction indicating * "save complete". Note: This function will not * return!! */ spu_stop(SPU_SAVE_COMPLETE); } /** * main - entry point for SPU-side context save. * * This code deviates from the documented sequence as follows: * * 1. The EA for LSCSA is passed from PPE in the * signal notification channels. * 2. All 128 registers are saved by crt0.o. */ int main() { addr64 lscsa_ea; lscsa_ea.ui[0] = spu_readch(SPU_RdSigNotify1); lscsa_ea.ui[1] = spu_readch(SPU_RdSigNotify2); /* Step 1: done by exit(). */ save_event_mask(); /* Step 2. */ save_tag_mask(); /* Step 3. */ set_event_mask(); /* Step 4. */ set_tag_mask(); /* Step 5. */ build_dma_list(lscsa_ea); /* Step 6. */ save_upper_240kb(lscsa_ea); /* Step 7. */ /* Step 8: done by exit(). */ save_fpcr(); /* Step 9. */ save_decr(); /* Step 10. */ save_srr0(); /* Step 11. */ enqueue_putllc(lscsa_ea); /* Step 12. */ spill_regs_to_mem(lscsa_ea); /* Step 13. */ enqueue_sync(lscsa_ea); /* Step 14. */ set_tag_update(); /* Step 15. */ read_tag_status(); /* Step 16. */ read_llar_status(); /* Step 17. */ save_complete(); /* Step 18. */ return 0; }
linux-master
arch/powerpc/platforms/cell/spufs/spu_save.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SPU core dump code * * (C) Copyright 2006 IBM Corp. * * Author: Dwayne Grant McConnell <[email protected]> */ #include <linux/elf.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/fs.h> #include <linux/gfp.h> #include <linux/list.h> #include <linux/syscalls.h> #include <linux/coredump.h> #include <linux/binfmts.h> #include <linux/uaccess.h> #include "spufs.h" static int spufs_ctx_note_size(struct spu_context *ctx, int dfd) { int i, sz, total = 0; char *name; char fullname[80]; for (i = 0; spufs_coredump_read[i].name != NULL; i++) { name = spufs_coredump_read[i].name; sz = spufs_coredump_read[i].size; sprintf(fullname, "SPU/%d/%s", dfd, name); total += sizeof(struct elf_note); total += roundup(strlen(fullname) + 1, 4); total += roundup(sz, 4); } return total; } static int match_context(const void *v, struct file *file, unsigned fd) { struct spu_context *ctx; if (file->f_op != &spufs_context_fops) return 0; ctx = SPUFS_I(file_inode(file))->i_ctx; if (ctx->flags & SPU_CREATE_NOSCHED) return 0; return fd + 1; } /* * The additional architecture-specific notes for Cell are various * context files in the spu context. * * This function iterates over all open file descriptors and sees * if they are a directory in spufs. In that case we use spufs * internal functionality to dump them without needing to actually * open the files. */ /* * descriptor table is not shared, so files can't change or go away. */ static struct spu_context *coredump_next_context(int *fd) { struct spu_context *ctx; struct file *file; int n = iterate_fd(current->files, *fd, match_context, NULL); if (!n) return NULL; *fd = n - 1; rcu_read_lock(); file = lookup_fd_rcu(*fd); ctx = SPUFS_I(file_inode(file))->i_ctx; get_spu_context(ctx); rcu_read_unlock(); return ctx; } int spufs_coredump_extra_notes_size(void) { struct spu_context *ctx; int size = 0, rc, fd; fd = 0; while ((ctx = coredump_next_context(&fd)) != NULL) { rc = spu_acquire_saved(ctx); if (rc) { put_spu_context(ctx); break; } rc = spufs_ctx_note_size(ctx, fd); spu_release_saved(ctx); if (rc < 0) { put_spu_context(ctx); break; } size += rc; /* start searching the next fd next time */ fd++; put_spu_context(ctx); } return size; } static int spufs_arch_write_note(struct spu_context *ctx, int i, struct coredump_params *cprm, int dfd) { size_t sz = spufs_coredump_read[i].size; char fullname[80]; struct elf_note en; int ret; sprintf(fullname, "SPU/%d/%s", dfd, spufs_coredump_read[i].name); en.n_namesz = strlen(fullname) + 1; en.n_descsz = sz; en.n_type = NT_SPU; if (!dump_emit(cprm, &en, sizeof(en))) return -EIO; if (!dump_emit(cprm, fullname, en.n_namesz)) return -EIO; if (!dump_align(cprm, 4)) return -EIO; if (spufs_coredump_read[i].dump) { ret = spufs_coredump_read[i].dump(ctx, cprm); if (ret < 0) return ret; } else { char buf[32]; ret = snprintf(buf, sizeof(buf), "0x%.16llx", spufs_coredump_read[i].get(ctx)); if (ret >= sizeof(buf)) return sizeof(buf); /* count trailing the NULL: */ if (!dump_emit(cprm, buf, ret + 1)) return -EIO; } dump_skip_to(cprm, roundup(cprm->pos - ret + sz, 4)); return 0; } int spufs_coredump_extra_notes_write(struct coredump_params *cprm) { struct spu_context *ctx; int fd, j, rc; fd = 0; while ((ctx = coredump_next_context(&fd)) != NULL) { rc = spu_acquire_saved(ctx); if (rc) return rc; for (j = 0; spufs_coredump_read[j].name != NULL; j++) { rc = spufs_arch_write_note(ctx, j, cprm, fd); if (rc) { spu_release_saved(ctx); return rc; } } spu_release_saved(ctx); /* start searching the next fd next time */ fd++; } return 0; }
linux-master
arch/powerpc/platforms/cell/spufs/coredump.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * spu_switch.c * * (C) Copyright IBM Corp. 2005 * * Author: Mark Nutter <[email protected]> * * Host-side part of SPU context switch sequence outlined in * Synergistic Processor Element, Book IV. * * A fully premptive switch of an SPE is very expensive in terms * of time and system resources. SPE Book IV indicates that SPE * allocation should follow a "serially reusable device" model, * in which the SPE is assigned a task until it completes. When * this is not possible, this sequence may be used to premptively * save, and then later (optionally) restore the context of a * program executing on an SPE. */ #include <linux/export.h> #include <linux/errno.h> #include <linux/hardirq.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <asm/io.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/spu_csa.h> #include <asm/mmu_context.h> #include "spufs.h" #include "spu_save_dump.h" #include "spu_restore_dump.h" #if 0 #define POLL_WHILE_TRUE(_c) { \ do { \ } while (_c); \ } #else #define RELAX_SPIN_COUNT 1000 #define POLL_WHILE_TRUE(_c) { \ do { \ int _i; \ for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \ cpu_relax(); \ } \ if (unlikely(_c)) yield(); \ else break; \ } while (_c); \ } #endif /* debug */ #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c)) static inline void acquire_spu_lock(struct spu *spu) { /* Save, Step 1: * Restore, Step 1: * Acquire SPU-specific mutual exclusion lock. * TBD. */ } static inline void release_spu_lock(struct spu *spu) { /* Restore, Step 76: * Release SPU-specific mutual exclusion lock. * TBD. */ } static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 isolate_state; /* Save, Step 2: * Save, Step 6: * If SPU_Status[E,L,IS] any field is '1', this * SPU is in isolate state and cannot be context * saved at this time. */ isolate_state = SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS; return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0; } static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) { /* Save, Step 3: * Restore, Step 2: * Save INT_Mask_class0 in CSA. * Write INT_MASK_class0 with value of 0. * Save INT_Mask_class1 in CSA. * Write INT_MASK_class1 with value of 0. * Save INT_Mask_class2 in CSA. * Write INT_MASK_class2 with value of 0. * Synchronize all three interrupts to be sure * we no longer execute a handler on another CPU. */ spin_lock_irq(&spu->register_lock); if (csa) { csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0); csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1); csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2); } spu_int_mask_set(spu, 0, 0ul); spu_int_mask_set(spu, 1, 0ul); spu_int_mask_set(spu, 2, 0ul); eieio(); spin_unlock_irq(&spu->register_lock); /* * This flag needs to be set before calling synchronize_irq so * that the update will be visible to the relevant handlers * via a simple load. */ set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags); synchronize_irq(spu->irqs[0]); synchronize_irq(spu->irqs[1]); synchronize_irq(spu->irqs[2]); } static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) { /* Save, Step 4: * Restore, Step 25. * Set a software watchdog timer, which specifies the * maximum allowable time for a context save sequence. * * For present, this implementation will not set a global * watchdog timer, as virtualization & variable system load * may cause unpredictable execution times. */ } static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu) { /* Save, Step 5: * Restore, Step 3: * Inhibit user-space access (if provided) to this * SPU by unmapping the virtual pages assigned to * the SPU memory-mapped I/O (MMIO) for problem * state. TBD. */ } static inline void set_switch_pending(struct spu_state *csa, struct spu *spu) { /* Save, Step 7: * Restore, Step 5: * Set a software context switch pending flag. * Done above in Step 3 - disable_interrupts(). */ } static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 8: * Suspend DMA and save MFC_CNTL. */ switch (in_be64(&priv2->mfc_control_RW) & MFC_CNTL_SUSPEND_DMA_STATUS_MASK) { case MFC_CNTL_SUSPEND_IN_PROGRESS: POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == MFC_CNTL_SUSPEND_COMPLETE); fallthrough; case MFC_CNTL_SUSPEND_COMPLETE: if (csa) csa->priv2.mfc_control_RW = in_be64(&priv2->mfc_control_RW) | MFC_CNTL_SUSPEND_DMA_QUEUE; break; case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION: out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == MFC_CNTL_SUSPEND_COMPLETE); if (csa) csa->priv2.mfc_control_RW = in_be64(&priv2->mfc_control_RW) & ~MFC_CNTL_SUSPEND_DMA_QUEUE & ~MFC_CNTL_SUSPEND_MASK; break; } } static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 9: * Save SPU_Runcntl in the CSA. This value contains * the "Application Desired State". */ csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW); } static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu) { /* Save, Step 10: * Save MFC_SR1 in the CSA. */ csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu); } static inline void save_spu_status(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 11: * Read SPU_Status[R], and save to CSA. */ if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) { csa->prob.spu_status_R = in_be32(&prob->spu_status_R); } else { u32 stopped; out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; if ((in_be32(&prob->spu_status_R) & stopped) == 0) csa->prob.spu_status_R = SPU_STATUS_RUNNING; else csa->prob.spu_status_R = in_be32(&prob->spu_status_R); } } static inline void save_mfc_stopped_status(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; const u64 mask = MFC_CNTL_DECREMENTER_RUNNING | MFC_CNTL_DMA_QUEUES_EMPTY; /* Save, Step 12: * Read MFC_CNTL[Ds]. Update saved copy of * CSA.MFC_CNTL[Ds]. * * update: do the same with MFC_CNTL[Q]. */ csa->priv2.mfc_control_RW &= ~mask; csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask; } static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 13: * Write MFC_CNTL[Dh] set to a '1' to halt * the decrementer. */ out_be64(&priv2->mfc_control_RW, MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK); eieio(); } static inline void save_timebase(struct spu_state *csa, struct spu *spu) { /* Save, Step 14: * Read PPE Timebase High and Timebase low registers * and save in CSA. TBD. */ csa->suspend_time = get_cycles(); } static inline void remove_other_spu_access(struct spu_state *csa, struct spu *spu) { /* Save, Step 15: * Remove other SPU access to this SPU by unmapping * this SPU's pages from their address space. TBD. */ } static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 16: * Restore, Step 11. * Write SPU_MSSync register. Poll SPU_MSSync[P] * for a value of 0. */ out_be64(&prob->spc_mssync_RW, 1UL); POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING); } static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu) { /* Save, Step 17: * Restore, Step 12. * Restore, Step 48. * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register. * Then issue a PPE sync instruction. */ spu_tlb_invalidate(spu); mb(); } static inline void handle_pending_interrupts(struct spu_state *csa, struct spu *spu) { /* Save, Step 18: * Handle any pending interrupts from this SPU * here. This is OS or hypervisor specific. One * option is to re-enable interrupts to handle any * pending interrupts, with the interrupt handlers * recognizing the software Context Switch Pending * flag, to ensure the SPU execution or MFC command * queue is not restarted. TBD. */ } static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; int i; /* Save, Step 19: * If MFC_Cntl[Se]=0 then save * MFC command queues. */ if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) { for (i = 0; i < 8; i++) { csa->priv2.puq[i].mfc_cq_data0_RW = in_be64(&priv2->puq[i].mfc_cq_data0_RW); csa->priv2.puq[i].mfc_cq_data1_RW = in_be64(&priv2->puq[i].mfc_cq_data1_RW); csa->priv2.puq[i].mfc_cq_data2_RW = in_be64(&priv2->puq[i].mfc_cq_data2_RW); csa->priv2.puq[i].mfc_cq_data3_RW = in_be64(&priv2->puq[i].mfc_cq_data3_RW); } for (i = 0; i < 16; i++) { csa->priv2.spuq[i].mfc_cq_data0_RW = in_be64(&priv2->spuq[i].mfc_cq_data0_RW); csa->priv2.spuq[i].mfc_cq_data1_RW = in_be64(&priv2->spuq[i].mfc_cq_data1_RW); csa->priv2.spuq[i].mfc_cq_data2_RW = in_be64(&priv2->spuq[i].mfc_cq_data2_RW); csa->priv2.spuq[i].mfc_cq_data3_RW = in_be64(&priv2->spuq[i].mfc_cq_data3_RW); } } } static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 20: * Save the PPU_QueryMask register * in the CSA. */ csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW); } static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 21: * Save the PPU_QueryType register * in the CSA. */ csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW); } static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save the Prxy_TagStatus register in the CSA. * * It is unnecessary to restore dma_tagstatus_R, however, * dma_tagstatus_R in the CSA is accessed via backing_ops, so * we must save it. */ csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R); } static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 22: * Save the MFC_CSR_TSQ register * in the LSCSA. */ csa->priv2.spu_tag_status_query_RW = in_be64(&priv2->spu_tag_status_query_RW); } static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 23: * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2 * registers in the CSA. */ csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW); csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW); } static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 24: * Save the MFC_CSR_ATO register in * the CSA. */ csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW); } static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu) { /* Save, Step 25: * Save the MFC_TCLASS_ID register in * the CSA. */ csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu); } static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu) { /* Save, Step 26: * Restore, Step 23. * Write the MFC_TCLASS_ID register with * the value 0x10000000. */ spu_mfc_tclass_id_set(spu, 0x10000000); eieio(); } static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 27: * Restore, Step 14. * Write MFC_CNTL[Pc]=1 (purge queue). */ out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST | MFC_CNTL_SUSPEND_MASK); eieio(); } static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 28: * Poll MFC_CNTL[Ps] until value '11' is read * (purge complete). */ POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_PURGE_DMA_STATUS_MASK) == MFC_CNTL_PURGE_DMA_COMPLETE); } static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu) { /* Save, Step 30: * Restore, Step 18: * Write MFC_SR1 with MFC_SR1[D=0,S=1] and * MFC_SR1[TL,R,Pr,T] set correctly for the * OS specific environment. * * Implementation note: The SPU-side code * for save/restore is privileged, so the * MFC_SR1[Pr] bit is not set. * */ spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK | MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK)); } static inline void save_spu_npc(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 31: * Save SPU_NPC in the CSA. */ csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW); } static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 32: * Save SPU_PrivCntl in the CSA. */ csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW); } static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 33: * Restore, Step 16: * Write SPU_PrivCntl[S,Le,A] fields reset to 0. */ out_be64(&priv2->spu_privcntl_RW, 0UL); eieio(); } static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 34: * Save SPU_LSLR in the CSA. */ csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW); } static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 35: * Restore, Step 17. * Reset SPU_LSLR. */ out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK); eieio(); } static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 36: * Save SPU_Cfg in the CSA. */ csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW); } static inline void save_pm_trace(struct spu_state *csa, struct spu *spu) { /* Save, Step 37: * Save PM_Trace_Tag_Wait_Mask in the CSA. * Not performed by this implementation. */ } static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu) { /* Save, Step 38: * Save RA_GROUP_ID register and the * RA_ENABLE reigster in the CSA. */ csa->priv1.resource_allocation_groupID_RW = spu_resource_allocation_groupID_get(spu); csa->priv1.resource_allocation_enable_RW = spu_resource_allocation_enable_get(spu); } static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 39: * Save MB_Stat register in the CSA. */ csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R); } static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 40: * Save the PPU_MB register in the CSA. */ csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R); } static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 41: * Save the PPUINT_MB register in the CSA. */ csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R); } static inline void save_ch_part1(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; int i; /* Save, Step 42: */ /* Save CH 1, without channel count */ out_be64(&priv2->spu_chnlcntptr_RW, 1); csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW); /* Save the following CH: [0,3,4,24,25,27] */ for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW); csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW); out_be64(&priv2->spu_chnldata_RW, 0UL); out_be64(&priv2->spu_chnlcnt_RW, 0UL); eieio(); } } static inline void save_spu_mb(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; int i; /* Save, Step 43: * Save SPU Read Mailbox Channel. */ out_be64(&priv2->spu_chnlcntptr_RW, 29UL); eieio(); csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW); for (i = 0; i < 4; i++) { csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW); } out_be64(&priv2->spu_chnlcnt_RW, 0UL); eieio(); } static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 44: * Save MFC_CMD Channel. */ out_be64(&priv2->spu_chnlcntptr_RW, 21UL); eieio(); csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW); eieio(); } static inline void reset_ch(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL }; u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL }; u64 idx; int i; /* Save, Step 45: * Reset the following CH: [21, 23, 28, 30] */ for (i = 0; i < 4; i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); eieio(); } } static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 46: * Restore, Step 25. * Write MFC_CNTL[Sc]=0 (resume queue processing). */ out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); } static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu, unsigned int *code, int code_size) { /* Save, Step 47: * Restore, Step 30. * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All * register, then initialize SLB_VSID and SLB_ESID * to provide access to SPU context save code and * LSCSA. * * This implementation places both the context * switch code and LSCSA in kernel address space. * * Further this implementation assumes that the * MFC_SR1[R]=1 (in other words, assume that * translation is desired by OS environment). */ spu_invalidate_slbs(spu); spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size); } static inline void set_switch_active(struct spu_state *csa, struct spu *spu) { /* Save, Step 48: * Restore, Step 23. * Change the software context switch pending flag * to context switch active. This implementation does * not uses a switch active flag. * * Now that we have saved the mfc in the csa, we can add in the * restart command if an exception occurred. */ if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags)) csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND; clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); mb(); } static inline void enable_interrupts(struct spu_state *csa, struct spu *spu) { unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR | CLASS1_ENABLE_STORAGE_FAULT_INTR; /* Save, Step 49: * Restore, Step 22: * Reset and then enable interrupts, as * needed by OS. * * This implementation enables only class1 * (translation) interrupts. */ spin_lock_irq(&spu->register_lock); spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK); spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); spu_int_mask_set(spu, 0, 0ul); spu_int_mask_set(spu, 1, class1_mask); spu_int_mask_set(spu, 2, 0ul); spin_unlock_irq(&spu->register_lock); } static inline int send_mfc_dma(struct spu *spu, unsigned long ea, unsigned int ls_offset, unsigned int size, unsigned int tag, unsigned int rclass, unsigned int cmd) { struct spu_problem __iomem *prob = spu->problem; union mfc_tag_size_class_cmd command; unsigned int transfer_size; volatile unsigned int status = 0x0; while (size > 0) { transfer_size = (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size; command.u.mfc_size = transfer_size; command.u.mfc_tag = tag; command.u.mfc_rclassid = rclass; command.u.mfc_cmd = cmd; do { out_be32(&prob->mfc_lsa_W, ls_offset); out_be64(&prob->mfc_ea_W, ea); out_be64(&prob->mfc_union_W.all64, command.all64); status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32); if (unlikely(status & 0x2)) { cpu_relax(); } } while (status & 0x3); size -= transfer_size; ea += transfer_size; ls_offset += transfer_size; } return 0; } static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu) { unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; unsigned int ls_offset = 0x0; unsigned int size = 16384; unsigned int tag = 0; unsigned int rclass = 0; unsigned int cmd = MFC_PUT_CMD; /* Save, Step 50: * Issue a DMA command to copy the first 16K bytes * of local storage to the CSA. */ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); } static inline void set_spu_npc(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 51: * Restore, Step 31. * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry * point address of context save code in local * storage. * * This implementation uses SPU-side save/restore * programs with entry points at LSA of 0. */ out_be32(&prob->spu_npc_RW, 0); eieio(); } static inline void set_signot1(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; union { u64 ull; u32 ui[2]; } addr64; /* Save, Step 52: * Restore, Step 32: * Write SPU_Sig_Notify_1 register with upper 32-bits * of the CSA.LSCSA effective address. */ addr64.ull = (u64) csa->lscsa; out_be32(&prob->signal_notify1, addr64.ui[0]); eieio(); } static inline void set_signot2(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; union { u64 ull; u32 ui[2]; } addr64; /* Save, Step 53: * Restore, Step 33: * Write SPU_Sig_Notify_2 register with lower 32-bits * of the CSA.LSCSA effective address. */ addr64.ull = (u64) csa->lscsa; out_be32(&prob->signal_notify2, addr64.ui[1]); eieio(); } static inline void send_save_code(struct spu_state *csa, struct spu *spu) { unsigned long addr = (unsigned long)&spu_save_code[0]; unsigned int ls_offset = 0x0; unsigned int size = sizeof(spu_save_code); unsigned int tag = 0; unsigned int rclass = 0; unsigned int cmd = MFC_GETFS_CMD; /* Save, Step 54: * Issue a DMA command to copy context save code * to local storage and start SPU. */ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); } static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Save, Step 55: * Restore, Step 38. * Write PPU_QueryMask=1 (enable Tag Group 0) * and issue eieio instruction. */ out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0)); eieio(); } static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 mask = MFC_TAGID_TO_TAGMASK(0); unsigned long flags; /* Save, Step 56: * Restore, Step 39. * Restore, Step 39. * Restore, Step 46. * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete) * or write PPU_QueryType[TS]=01 and wait for Tag Group * Complete Interrupt. Write INT_Stat_Class0 or * INT_Stat_Class2 with value of 'handled'. */ POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask); local_irq_save(flags); spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); local_irq_restore(flags); } static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; unsigned long flags; /* Save, Step 57: * Restore, Step 40. * Poll until SPU_Status[R]=0 or wait for SPU Class 0 * or SPU Class 2 interrupt. Write INT_Stat_class0 * or INT_Stat_class2 with value of handled. */ POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); local_irq_save(flags); spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); local_irq_restore(flags); } static inline int check_save_status(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 complete; /* Save, Step 54: * If SPU_Status[P]=1 and SPU_Status[SC] = "success", * context save succeeded, otherwise context save * failed. */ complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) | SPU_STATUS_STOPPED_BY_STOP); return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; } static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu) { /* Restore, Step 4: * If required, notify the "using application" that * the SPU task has been terminated. TBD. */ } static inline void suspend_mfc_and_halt_decr(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 7: * Write MFC_Cntl[Dh,Sc,Sm]='1','1','0' to suspend * the queue and halt the decrementer. */ out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE | MFC_CNTL_DECREMENTER_HALTED); eieio(); } static inline void wait_suspend_mfc_complete(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 8: * Restore, Step 47. * Poll MFC_CNTL[Ss] until 11 is returned. */ POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_SUSPEND_DMA_STATUS_MASK) == MFC_CNTL_SUSPEND_COMPLETE); } static inline int suspend_spe(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 9: * If SPU_Status[R]=1, stop SPU execution * and wait for stop to complete. * * Returns 1 if SPU_Status[R]=1 on entry. * 0 otherwise */ if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) { if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_EXIT_STATUS) { POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } if ((in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_LOAD_STATUS) || (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE)) { out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); out_be32(&prob->spu_runcntl_RW, 0x2); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } if (in_be32(&prob->spu_status_R) & SPU_STATUS_WAITING_FOR_CHANNEL) { out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } return 1; } return 0; } static inline void clear_spu_status(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 10: * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1, * release SPU from isolate state. */ if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) { if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_EXIT_STATUS) { spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK); eieio(); out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } if ((in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_LOAD_STATUS) || (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE)) { spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK); eieio(); out_be32(&prob->spu_runcntl_RW, 0x2); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } } } static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; u64 idx; int i; /* Restore, Step 20: */ /* Reset CH 1 */ out_be64(&priv2->spu_chnlcntptr_RW, 1); out_be64(&priv2->spu_chnldata_RW, 0UL); /* Reset the following CH: [0,3,4,24,25,27] */ for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); out_be64(&priv2->spu_chnldata_RW, 0UL); out_be64(&priv2->spu_chnlcnt_RW, 0UL); eieio(); } } static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL }; u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL }; u64 idx; int i; /* Restore, Step 21: * Reset the following CH: [21, 23, 28, 29, 30] */ for (i = 0; i < 5; i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); eieio(); } } static inline void setup_spu_status_part1(struct spu_state *csa, struct spu *spu) { u32 status_P = SPU_STATUS_STOPPED_BY_STOP; u32 status_I = SPU_STATUS_INVALID_INSTR; u32 status_H = SPU_STATUS_STOPPED_BY_HALT; u32 status_S = SPU_STATUS_SINGLE_STEP; u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR; u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP; u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP; u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR; u32 status_code; /* Restore, Step 27: * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct * instruction sequence to the end of the SPU based restore * code (after the "context restored" stop and signal) to * restore the correct SPU status. * * NOTE: Rather than modifying the SPU executable, we * instead add a new 'stopped_status' field to the * LSCSA. The SPU-side restore reads this field and * takes the appropriate action when exiting. */ status_code = (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF; if ((csa->prob.spu_status_R & status_P_I) == status_P_I) { /* SPU_Status[P,I]=1 - Illegal Instruction followed * by Stop and Signal instruction, followed by 'br -4'. * */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I; csa->lscsa->stopped_status.slot[1] = status_code; } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) { /* SPU_Status[P,H]=1 - Halt Conditional, followed * by Stop and Signal instruction, followed by * 'br -4'. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H; csa->lscsa->stopped_status.slot[1] = status_code; } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) { /* SPU_Status[S,P]=1 - Stop and Signal instruction * followed by 'br -4'. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P; csa->lscsa->stopped_status.slot[1] = status_code; } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) { /* SPU_Status[S,I]=1 - Illegal instruction followed * by 'br -4'. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I; csa->lscsa->stopped_status.slot[1] = status_code; } else if ((csa->prob.spu_status_R & status_P) == status_P) { /* SPU_Status[P]=1 - Stop and Signal instruction * followed by 'br -4'. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P; csa->lscsa->stopped_status.slot[1] = status_code; } else if ((csa->prob.spu_status_R & status_H) == status_H) { /* SPU_Status[H]=1 - Halt Conditional, followed * by 'br -4'. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H; } else if ((csa->prob.spu_status_R & status_S) == status_S) { /* SPU_Status[S]=1 - Two nop instructions. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S; } else if ((csa->prob.spu_status_R & status_I) == status_I) { /* SPU_Status[I]=1 - Illegal instruction followed * by 'br -4'. */ csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I; } } static inline void setup_spu_status_part2(struct spu_state *csa, struct spu *spu) { u32 mask; /* Restore, Step 28: * If the CSA.SPU_Status[I,S,H,P,R]=0 then * add a 'br *' instruction to the end of * the SPU based restore code. * * NOTE: Rather than modifying the SPU executable, we * instead add a new 'stopped_status' field to the * LSCSA. The SPU-side restore reads this field and * takes the appropriate action when exiting. */ mask = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; if (!(csa->prob.spu_status_R & mask)) { csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R; } } static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu) { /* Restore, Step 29: * Restore RA_GROUP_ID register and the * RA_ENABLE reigster from the CSA. */ spu_resource_allocation_groupID_set(spu, csa->priv1.resource_allocation_groupID_RW); spu_resource_allocation_enable_set(spu, csa->priv1.resource_allocation_enable_RW); } static inline void send_restore_code(struct spu_state *csa, struct spu *spu) { unsigned long addr = (unsigned long)&spu_restore_code[0]; unsigned int ls_offset = 0x0; unsigned int size = sizeof(spu_restore_code); unsigned int tag = 0; unsigned int rclass = 0; unsigned int cmd = MFC_GETFS_CMD; /* Restore, Step 37: * Issue MFC DMA command to copy context * restore code to local storage. */ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); } static inline void setup_decr(struct spu_state *csa, struct spu *spu) { /* Restore, Step 34: * If CSA.MFC_CNTL[Ds]=1 (decrementer was * running) then adjust decrementer, set * decrementer running status in LSCSA, * and set decrementer "wrapped" status * in LSCSA. */ if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) { cycles_t resume_time = get_cycles(); cycles_t delta_time = resume_time - csa->suspend_time; csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING; if (csa->lscsa->decr.slot[0] < delta_time) { csa->lscsa->decr_status.slot[0] |= SPU_DECR_STATUS_WRAPPED; } csa->lscsa->decr.slot[0] -= delta_time; } else { csa->lscsa->decr_status.slot[0] = 0; } } static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu) { /* Restore, Step 35: * Copy the CSA.PU_MB data into the LSCSA. */ csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R; } static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu) { /* Restore, Step 36: * Copy the CSA.PUINT_MB data into the LSCSA. */ csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R; } static inline int check_restore_status(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 complete; /* Restore, Step 40: * If SPU_Status[P]=1 and SPU_Status[SC] = "success", * context restore succeeded, otherwise context restore * failed. */ complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) | SPU_STATUS_STOPPED_BY_STOP); return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; } static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 41: * Restore SPU_PrivCntl from the CSA. */ out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW); eieio(); } static inline void restore_status_part1(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 mask; /* Restore, Step 42: * If any CSA.SPU_Status[I,S,H,P]=1, then * restore the error or single step state. */ mask = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; if (csa->prob.spu_status_R & mask) { out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } } static inline void restore_status_part2(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; u32 mask; /* Restore, Step 43: * If all CSA.SPU_Status[I,S,H,P,R]=0 then write * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1, * then write '00' to SPU_RunCntl[R0R1] and wait * for SPU_Status[R]=0. */ mask = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; if (!(csa->prob.spu_status_R & mask)) { out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); eieio(); POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); eieio(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); } } static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu) { unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; unsigned int ls_offset = 0x0; unsigned int size = 16384; unsigned int tag = 0; unsigned int rclass = 0; unsigned int cmd = MFC_GET_CMD; /* Restore, Step 44: * Issue a DMA command to restore the first * 16kb of local storage from CSA. */ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); } static inline void suspend_mfc(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 47. * Write MFC_Cntl[Sc,Sm]='1','0' to suspend * the queue. */ out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE); eieio(); } static inline void clear_interrupts(struct spu_state *csa, struct spu *spu) { /* Restore, Step 49: * Write INT_MASK_class0 with value of 0. * Write INT_MASK_class1 with value of 0. * Write INT_MASK_class2 with value of 0. * Write INT_STAT_class0 with value of -1. * Write INT_STAT_class1 with value of -1. * Write INT_STAT_class2 with value of -1. */ spin_lock_irq(&spu->register_lock); spu_int_mask_set(spu, 0, 0ul); spu_int_mask_set(spu, 1, 0ul); spu_int_mask_set(spu, 2, 0ul); spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK); spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK); spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK); spin_unlock_irq(&spu->register_lock); } static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; int i; /* Restore, Step 50: * If MFC_Cntl[Se]!=0 then restore * MFC command queues. */ if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) { for (i = 0; i < 8; i++) { out_be64(&priv2->puq[i].mfc_cq_data0_RW, csa->priv2.puq[i].mfc_cq_data0_RW); out_be64(&priv2->puq[i].mfc_cq_data1_RW, csa->priv2.puq[i].mfc_cq_data1_RW); out_be64(&priv2->puq[i].mfc_cq_data2_RW, csa->priv2.puq[i].mfc_cq_data2_RW); out_be64(&priv2->puq[i].mfc_cq_data3_RW, csa->priv2.puq[i].mfc_cq_data3_RW); } for (i = 0; i < 16; i++) { out_be64(&priv2->spuq[i].mfc_cq_data0_RW, csa->priv2.spuq[i].mfc_cq_data0_RW); out_be64(&priv2->spuq[i].mfc_cq_data1_RW, csa->priv2.spuq[i].mfc_cq_data1_RW); out_be64(&priv2->spuq[i].mfc_cq_data2_RW, csa->priv2.spuq[i].mfc_cq_data2_RW); out_be64(&priv2->spuq[i].mfc_cq_data3_RW, csa->priv2.spuq[i].mfc_cq_data3_RW); } } eieio(); } static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 51: * Restore the PPU_QueryMask register from CSA. */ out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW); eieio(); } static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 52: * Restore the PPU_QueryType register from CSA. */ out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW); eieio(); } static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 53: * Restore the MFC_CSR_TSQ register from CSA. */ out_be64(&priv2->spu_tag_status_query_RW, csa->priv2.spu_tag_status_query_RW); eieio(); } static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 54: * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2 * registers from CSA. */ out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW); out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW); eieio(); } static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 55: * Restore the MFC_CSR_ATO register from CSA. */ out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW); } static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu) { /* Restore, Step 56: * Restore the MFC_TCLASS_ID register from CSA. */ spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW); eieio(); } static inline void set_llr_event(struct spu_state *csa, struct spu *spu) { u64 ch0_cnt, ch0_data; u64 ch1_data; /* Restore, Step 57: * Set the Lock Line Reservation Lost Event by: * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1. * 2. If CSA.SPU_Channel_0_Count=0 and * CSA.SPU_Wr_Event_Mask[Lr]=1 and * CSA.SPU_Event_Status[Lr]=0 then set * CSA.SPU_Event_Status_Count=1. */ ch0_cnt = csa->spu_chnlcnt_RW[0]; ch0_data = csa->spu_chnldata_RW[0]; ch1_data = csa->spu_chnldata_RW[1]; csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT; if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) && (ch1_data & MFC_LLR_LOST_EVENT)) { csa->spu_chnlcnt_RW[0] = 1; } } static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu) { /* Restore, Step 58: * If the status of the CSA software decrementer * "wrapped" flag is set, OR in a '1' to * CSA.SPU_Event_Status[Tm]. */ if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED)) return; if ((csa->spu_chnlcnt_RW[0] == 0) && (csa->spu_chnldata_RW[1] & 0x20) && !(csa->spu_chnldata_RW[0] & 0x20)) csa->spu_chnlcnt_RW[0] = 1; csa->spu_chnldata_RW[0] |= 0x20; } static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL }; int i; /* Restore, Step 59: * Restore the following CH: [0,3,4,24,25,27] */ for (i = 0; i < ARRAY_SIZE(ch_indices); i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]); out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]); eieio(); } } static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; u64 ch_indices[3] = { 9UL, 21UL, 23UL }; u64 ch_counts[3] = { 1UL, 16UL, 1UL }; u64 idx; int i; /* Restore, Step 60: * Restore the following CH: [9,21,23]. */ ch_counts[0] = 1UL; ch_counts[1] = csa->spu_chnlcnt_RW[21]; ch_counts[2] = 1UL; for (i = 0; i < 3; i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); eieio(); } } static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 61: * Restore the SPU_LSLR register from CSA. */ out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW); eieio(); } static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 62: * Restore the SPU_Cfg register from CSA. */ out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW); eieio(); } static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu) { /* Restore, Step 63: * Restore PM_Trace_Tag_Wait_Mask from CSA. * Not performed by this implementation. */ } static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 64: * Restore SPU_NPC from CSA. */ out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW); eieio(); } static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; int i; /* Restore, Step 65: * Restore MFC_RdSPU_MB from CSA. */ out_be64(&priv2->spu_chnlcntptr_RW, 29UL); eieio(); out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]); for (i = 0; i < 4; i++) { out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]); } eieio(); } static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 66: * If CSA.MB_Stat[P]=0 (mailbox empty) then * read from the PPU_MB register. */ if ((csa->prob.mb_stat_R & 0xFF) == 0) { in_be32(&prob->pu_mb_R); eieio(); } } static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 66: * If CSA.MB_Stat[I]=0 (mailbox empty) then * read from the PPUINT_MB register. */ if ((csa->prob.mb_stat_R & 0xFF0000) == 0) { in_be64(&priv2->puint_mb_R); eieio(); spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); eieio(); } } static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) { /* Restore, Step 69: * Restore the MFC_SR1 register from CSA. */ spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW); eieio(); } static inline void set_int_route(struct spu_state *csa, struct spu *spu) { struct spu_context *ctx = spu->ctx; spu_cpu_affinity_set(spu, ctx->last_ran); } static inline void restore_other_spu_access(struct spu_state *csa, struct spu *spu) { /* Restore, Step 70: * Restore other SPU mappings to this SPU. TBD. */ } static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; /* Restore, Step 71: * If CSA.SPU_Status[R]=1 then write * SPU_RunCntl[R0R1]='01'. */ if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) { out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); eieio(); } } static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; /* Restore, Step 72: * Restore the MFC_CNTL register for the CSA. */ out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); eieio(); /* * The queue is put back into the same state that was evident prior to * the context switch. The suspend flag is added to the saved state in * the csa, if the operational state was suspending or suspended. In * this case, the code that suspended the mfc is responsible for * continuing it. Note that SPE faults do not change the operational * state of the spu. */ } static inline void enable_user_access(struct spu_state *csa, struct spu *spu) { /* Restore, Step 73: * Enable user-space access (if provided) to this * SPU by mapping the virtual pages assigned to * the SPU memory-mapped I/O (MMIO) for problem * state. TBD. */ } static inline void reset_switch_active(struct spu_state *csa, struct spu *spu) { /* Restore, Step 74: * Reset the "context switch active" flag. * Not performed by this implementation. */ } static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu) { /* Restore, Step 75: * Re-enable SPU interrupts. */ spin_lock_irq(&spu->register_lock); spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW); spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW); spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW); spin_unlock_irq(&spu->register_lock); } static int quiece_spu(struct spu_state *prev, struct spu *spu) { /* * Combined steps 2-18 of SPU context save sequence, which * quiesce the SPU state (disable SPU execution, MFC command * queues, decrementer, SPU interrupts, etc.). * * Returns 0 on success. * 2 if failed step 2. * 6 if failed step 6. */ if (check_spu_isolate(prev, spu)) { /* Step 2. */ return 2; } disable_interrupts(prev, spu); /* Step 3. */ set_watchdog_timer(prev, spu); /* Step 4. */ inhibit_user_access(prev, spu); /* Step 5. */ if (check_spu_isolate(prev, spu)) { /* Step 6. */ return 6; } set_switch_pending(prev, spu); /* Step 7. */ save_mfc_cntl(prev, spu); /* Step 8. */ save_spu_runcntl(prev, spu); /* Step 9. */ save_mfc_sr1(prev, spu); /* Step 10. */ save_spu_status(prev, spu); /* Step 11. */ save_mfc_stopped_status(prev, spu); /* Step 12. */ halt_mfc_decr(prev, spu); /* Step 13. */ save_timebase(prev, spu); /* Step 14. */ remove_other_spu_access(prev, spu); /* Step 15. */ do_mfc_mssync(prev, spu); /* Step 16. */ issue_mfc_tlbie(prev, spu); /* Step 17. */ handle_pending_interrupts(prev, spu); /* Step 18. */ return 0; } static void save_csa(struct spu_state *prev, struct spu *spu) { /* * Combine steps 19-44 of SPU context save sequence, which * save regions of the privileged & problem state areas. */ save_mfc_queues(prev, spu); /* Step 19. */ save_ppu_querymask(prev, spu); /* Step 20. */ save_ppu_querytype(prev, spu); /* Step 21. */ save_ppu_tagstatus(prev, spu); /* NEW. */ save_mfc_csr_tsq(prev, spu); /* Step 22. */ save_mfc_csr_cmd(prev, spu); /* Step 23. */ save_mfc_csr_ato(prev, spu); /* Step 24. */ save_mfc_tclass_id(prev, spu); /* Step 25. */ set_mfc_tclass_id(prev, spu); /* Step 26. */ save_mfc_cmd(prev, spu); /* Step 26a - moved from 44. */ purge_mfc_queue(prev, spu); /* Step 27. */ wait_purge_complete(prev, spu); /* Step 28. */ setup_mfc_sr1(prev, spu); /* Step 30. */ save_spu_npc(prev, spu); /* Step 31. */ save_spu_privcntl(prev, spu); /* Step 32. */ reset_spu_privcntl(prev, spu); /* Step 33. */ save_spu_lslr(prev, spu); /* Step 34. */ reset_spu_lslr(prev, spu); /* Step 35. */ save_spu_cfg(prev, spu); /* Step 36. */ save_pm_trace(prev, spu); /* Step 37. */ save_mfc_rag(prev, spu); /* Step 38. */ save_ppu_mb_stat(prev, spu); /* Step 39. */ save_ppu_mb(prev, spu); /* Step 40. */ save_ppuint_mb(prev, spu); /* Step 41. */ save_ch_part1(prev, spu); /* Step 42. */ save_spu_mb(prev, spu); /* Step 43. */ reset_ch(prev, spu); /* Step 45. */ } static void save_lscsa(struct spu_state *prev, struct spu *spu) { /* * Perform steps 46-57 of SPU context save sequence, * which save regions of the local store and register * file. */ resume_mfc_queue(prev, spu); /* Step 46. */ /* Step 47. */ setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code)); set_switch_active(prev, spu); /* Step 48. */ enable_interrupts(prev, spu); /* Step 49. */ save_ls_16kb(prev, spu); /* Step 50. */ set_spu_npc(prev, spu); /* Step 51. */ set_signot1(prev, spu); /* Step 52. */ set_signot2(prev, spu); /* Step 53. */ send_save_code(prev, spu); /* Step 54. */ set_ppu_querymask(prev, spu); /* Step 55. */ wait_tag_complete(prev, spu); /* Step 56. */ wait_spu_stopped(prev, spu); /* Step 57. */ } static void force_spu_isolate_exit(struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; struct spu_priv2 __iomem *priv2 = spu->priv2; /* Stop SPE execution and wait for completion. */ out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); iobarrier_rw(); POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); /* Restart SPE master runcntl. */ spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK); iobarrier_w(); /* Initiate isolate exit request and wait for completion. */ out_be64(&priv2->spu_privcntl_RW, 4LL); iobarrier_w(); out_be32(&prob->spu_runcntl_RW, 2); iobarrier_rw(); POLL_WHILE_FALSE((in_be32(&prob->spu_status_R) & SPU_STATUS_STOPPED_BY_STOP)); /* Reset load request to normal. */ out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL); iobarrier_w(); } /** * stop_spu_isolate * Check SPU run-control state and force isolated * exit function as necessary. */ static void stop_spu_isolate(struct spu *spu) { struct spu_problem __iomem *prob = spu->problem; if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) { /* The SPU is in isolated state; the only way * to get it out is to perform an isolated * exit (clean) operation. */ force_spu_isolate_exit(spu); } } static void harvest(struct spu_state *prev, struct spu *spu) { /* * Perform steps 2-25 of SPU context restore sequence, * which resets an SPU either after a failed save, or * when using SPU for first time. */ disable_interrupts(prev, spu); /* Step 2. */ inhibit_user_access(prev, spu); /* Step 3. */ terminate_spu_app(prev, spu); /* Step 4. */ set_switch_pending(prev, spu); /* Step 5. */ stop_spu_isolate(spu); /* NEW. */ remove_other_spu_access(prev, spu); /* Step 6. */ suspend_mfc_and_halt_decr(prev, spu); /* Step 7. */ wait_suspend_mfc_complete(prev, spu); /* Step 8. */ if (!suspend_spe(prev, spu)) /* Step 9. */ clear_spu_status(prev, spu); /* Step 10. */ do_mfc_mssync(prev, spu); /* Step 11. */ issue_mfc_tlbie(prev, spu); /* Step 12. */ handle_pending_interrupts(prev, spu); /* Step 13. */ purge_mfc_queue(prev, spu); /* Step 14. */ wait_purge_complete(prev, spu); /* Step 15. */ reset_spu_privcntl(prev, spu); /* Step 16. */ reset_spu_lslr(prev, spu); /* Step 17. */ setup_mfc_sr1(prev, spu); /* Step 18. */ spu_invalidate_slbs(spu); /* Step 19. */ reset_ch_part1(prev, spu); /* Step 20. */ reset_ch_part2(prev, spu); /* Step 21. */ enable_interrupts(prev, spu); /* Step 22. */ set_switch_active(prev, spu); /* Step 23. */ set_mfc_tclass_id(prev, spu); /* Step 24. */ resume_mfc_queue(prev, spu); /* Step 25. */ } static void restore_lscsa(struct spu_state *next, struct spu *spu) { /* * Perform steps 26-40 of SPU context restore sequence, * which restores regions of the local store and register * file. */ set_watchdog_timer(next, spu); /* Step 26. */ setup_spu_status_part1(next, spu); /* Step 27. */ setup_spu_status_part2(next, spu); /* Step 28. */ restore_mfc_rag(next, spu); /* Step 29. */ /* Step 30. */ setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code)); set_spu_npc(next, spu); /* Step 31. */ set_signot1(next, spu); /* Step 32. */ set_signot2(next, spu); /* Step 33. */ setup_decr(next, spu); /* Step 34. */ setup_ppu_mb(next, spu); /* Step 35. */ setup_ppuint_mb(next, spu); /* Step 36. */ send_restore_code(next, spu); /* Step 37. */ set_ppu_querymask(next, spu); /* Step 38. */ wait_tag_complete(next, spu); /* Step 39. */ wait_spu_stopped(next, spu); /* Step 40. */ } static void restore_csa(struct spu_state *next, struct spu *spu) { /* * Combine steps 41-76 of SPU context restore sequence, which * restore regions of the privileged & problem state areas. */ restore_spu_privcntl(next, spu); /* Step 41. */ restore_status_part1(next, spu); /* Step 42. */ restore_status_part2(next, spu); /* Step 43. */ restore_ls_16kb(next, spu); /* Step 44. */ wait_tag_complete(next, spu); /* Step 45. */ suspend_mfc(next, spu); /* Step 46. */ wait_suspend_mfc_complete(next, spu); /* Step 47. */ issue_mfc_tlbie(next, spu); /* Step 48. */ clear_interrupts(next, spu); /* Step 49. */ restore_mfc_queues(next, spu); /* Step 50. */ restore_ppu_querymask(next, spu); /* Step 51. */ restore_ppu_querytype(next, spu); /* Step 52. */ restore_mfc_csr_tsq(next, spu); /* Step 53. */ restore_mfc_csr_cmd(next, spu); /* Step 54. */ restore_mfc_csr_ato(next, spu); /* Step 55. */ restore_mfc_tclass_id(next, spu); /* Step 56. */ set_llr_event(next, spu); /* Step 57. */ restore_decr_wrapped(next, spu); /* Step 58. */ restore_ch_part1(next, spu); /* Step 59. */ restore_ch_part2(next, spu); /* Step 60. */ restore_spu_lslr(next, spu); /* Step 61. */ restore_spu_cfg(next, spu); /* Step 62. */ restore_pm_trace(next, spu); /* Step 63. */ restore_spu_npc(next, spu); /* Step 64. */ restore_spu_mb(next, spu); /* Step 65. */ check_ppu_mb_stat(next, spu); /* Step 66. */ check_ppuint_mb_stat(next, spu); /* Step 67. */ spu_invalidate_slbs(spu); /* Modified Step 68. */ restore_mfc_sr1(next, spu); /* Step 69. */ set_int_route(next, spu); /* NEW */ restore_other_spu_access(next, spu); /* Step 70. */ restore_spu_runcntl(next, spu); /* Step 71. */ restore_mfc_cntl(next, spu); /* Step 72. */ enable_user_access(next, spu); /* Step 73. */ reset_switch_active(next, spu); /* Step 74. */ reenable_interrupts(next, spu); /* Step 75. */ } static int __do_spu_save(struct spu_state *prev, struct spu *spu) { int rc; /* * SPU context save can be broken into three phases: * * (a) quiesce [steps 2-16]. * (b) save of CSA, performed by PPE [steps 17-42] * (c) save of LSCSA, mostly performed by SPU [steps 43-52]. * * Returns 0 on success. * 2,6 if failed to quiece SPU * 53 if SPU-side of save failed. */ rc = quiece_spu(prev, spu); /* Steps 2-16. */ switch (rc) { default: case 2: case 6: harvest(prev, spu); return rc; break; case 0: break; } save_csa(prev, spu); /* Steps 17-43. */ save_lscsa(prev, spu); /* Steps 44-53. */ return check_save_status(prev, spu); /* Step 54. */ } static int __do_spu_restore(struct spu_state *next, struct spu *spu) { int rc; /* * SPU context restore can be broken into three phases: * * (a) harvest (or reset) SPU [steps 2-24]. * (b) restore LSCSA [steps 25-40], mostly performed by SPU. * (c) restore CSA [steps 41-76], performed by PPE. * * The 'harvest' step is not performed here, but rather * as needed below. */ restore_lscsa(next, spu); /* Steps 24-39. */ rc = check_restore_status(next, spu); /* Step 40. */ switch (rc) { default: /* Failed. Return now. */ return rc; break; case 0: /* Fall through to next step. */ break; } restore_csa(next, spu); return 0; } /** * spu_save - SPU context save, with locking. * @prev: pointer to SPU context save area, to be saved. * @spu: pointer to SPU iomem structure. * * Acquire locks, perform the save operation then return. */ int spu_save(struct spu_state *prev, struct spu *spu) { int rc; acquire_spu_lock(spu); /* Step 1. */ rc = __do_spu_save(prev, spu); /* Steps 2-53. */ release_spu_lock(spu); if (rc != 0 && rc != 2 && rc != 6) { panic("%s failed on SPU[%d], rc=%d.\n", __func__, spu->number, rc); } return 0; } EXPORT_SYMBOL_GPL(spu_save); /** * spu_restore - SPU context restore, with harvest and locking. * @new: pointer to SPU context save area, to be restored. * @spu: pointer to SPU iomem structure. * * Perform harvest + restore, as we may not be coming * from a previous successful save operation, and the * hardware state is unknown. */ int spu_restore(struct spu_state *new, struct spu *spu) { int rc; acquire_spu_lock(spu); harvest(NULL, spu); spu->slb_replace = 0; rc = __do_spu_restore(new, spu); release_spu_lock(spu); if (rc) { panic("%s failed on SPU[%d] rc=%d.\n", __func__, spu->number, rc); } return rc; } EXPORT_SYMBOL_GPL(spu_restore); static void init_prob(struct spu_state *csa) { csa->spu_chnlcnt_RW[9] = 1; csa->spu_chnlcnt_RW[21] = 16; csa->spu_chnlcnt_RW[23] = 1; csa->spu_chnlcnt_RW[28] = 1; csa->spu_chnlcnt_RW[30] = 1; csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP; csa->prob.mb_stat_R = 0x000400; } static void init_priv1(struct spu_state *csa) { /* Enable decode, relocate, tlbie response, master runcntl. */ csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK | MFC_STATE1_MASTER_RUN_CONTROL_MASK | MFC_STATE1_PROBLEM_STATE_MASK | MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK; /* Enable OS-specific set of interrupts. */ csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR | CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR | CLASS0_ENABLE_SPU_ERROR_INTR; csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR | CLASS1_ENABLE_STORAGE_FAULT_INTR; csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR | CLASS2_ENABLE_SPU_HALT_INTR | CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR; } static void init_priv2(struct spu_state *csa) { csa->priv2.spu_lslr_RW = LS_ADDR_MASK; csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE | MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION | MFC_CNTL_DMA_QUEUES_EMPTY_MASK; } /** * spu_alloc_csa - allocate and initialize an SPU context save area. * * Allocate and initialize the contents of an SPU context save area. * This includes enabling address translation, interrupt masks, etc., * as appropriate for the given OS environment. * * Note that storage for the 'lscsa' is allocated separately, * as it is by far the largest of the context save regions, * and may need to be pinned or otherwise specially aligned. */ int spu_init_csa(struct spu_state *csa) { int rc; if (!csa) return -EINVAL; memset(csa, 0, sizeof(struct spu_state)); rc = spu_alloc_lscsa(csa); if (rc) return rc; spin_lock_init(&csa->register_lock); init_prob(csa); init_priv1(csa); init_priv2(csa); return 0; } void spu_fini_csa(struct spu_state *csa) { spu_free_lscsa(csa); }
linux-master
arch/powerpc/platforms/cell/spufs/switch.c
// SPDX-License-Identifier: GPL-2.0 #define DEBUG #include <linux/wait.h> #include <linux/ptrace.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <asm/io.h> #include <asm/unistd.h> #include "spufs.h" /* interrupt-level stop callback function. */ void spufs_stop_callback(struct spu *spu, int irq) { struct spu_context *ctx = spu->ctx; /* * It should be impossible to preempt a context while an exception * is being processed, since the context switch code is specially * coded to deal with interrupts ... But, just in case, sanity check * the context pointer. It is OK to return doing nothing since * the exception will be regenerated when the context is resumed. */ if (ctx) { /* Copy exception arguments into module specific structure */ switch(irq) { case 0 : ctx->csa.class_0_pending = spu->class_0_pending; ctx->csa.class_0_dar = spu->class_0_dar; break; case 1 : ctx->csa.class_1_dsisr = spu->class_1_dsisr; ctx->csa.class_1_dar = spu->class_1_dar; break; case 2 : break; } /* ensure that the exception status has hit memory before a * thread waiting on the context's stop queue is woken */ smp_wmb(); wake_up_all(&ctx->stop_wq); } } int spu_stopped(struct spu_context *ctx, u32 *stat) { u64 dsisr; u32 stopped; stopped = SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; top: *stat = ctx->ops->status_read(ctx); if (*stat & stopped) { /* * If the spu hasn't finished stopping, we need to * re-read the register to get the stopped value. */ if (*stat & SPU_STATUS_RUNNING) goto top; return 1; } if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) return 1; dsisr = ctx->csa.class_1_dsisr; if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)) return 1; if (ctx->csa.class_0_pending) return 1; return 0; } static int spu_setup_isolated(struct spu_context *ctx) { int ret; u64 __iomem *mfc_cntl; u64 sr1; u32 status; unsigned long timeout; const u32 status_loading = SPU_STATUS_RUNNING | SPU_STATUS_ISOLATED_STATE | SPU_STATUS_ISOLATED_LOAD_STATUS; ret = -ENODEV; if (!isolated_loader) goto out; /* * We need to exclude userspace access to the context. * * To protect against memory access we invalidate all ptes * and make sure the pagefault handlers block on the mutex. */ spu_unmap_mappings(ctx); mfc_cntl = &ctx->spu->priv2->mfc_control_RW; /* purge the MFC DMA queue to ensure no spurious accesses before we * enter kernel mode */ timeout = jiffies + HZ; out_be64(mfc_cntl, MFC_CNTL_PURGE_DMA_REQUEST); while ((in_be64(mfc_cntl) & MFC_CNTL_PURGE_DMA_STATUS_MASK) != MFC_CNTL_PURGE_DMA_COMPLETE) { if (time_after(jiffies, timeout)) { printk(KERN_ERR "%s: timeout flushing MFC DMA queue\n", __func__); ret = -EIO; goto out; } cond_resched(); } /* clear purge status */ out_be64(mfc_cntl, 0); /* put the SPE in kernel mode to allow access to the loader */ sr1 = spu_mfc_sr1_get(ctx->spu); sr1 &= ~MFC_STATE1_PROBLEM_STATE_MASK; spu_mfc_sr1_set(ctx->spu, sr1); /* start the loader */ ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32); ctx->ops->signal2_write(ctx, (unsigned long)isolated_loader & 0xffffffff); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); ret = 0; timeout = jiffies + HZ; while (((status = ctx->ops->status_read(ctx)) & status_loading) == status_loading) { if (time_after(jiffies, timeout)) { printk(KERN_ERR "%s: timeout waiting for loader\n", __func__); ret = -EIO; goto out_drop_priv; } cond_resched(); } if (!(status & SPU_STATUS_RUNNING)) { /* If isolated LOAD has failed: run SPU, we will get a stop-and * signal later. */ pr_debug("%s: isolated LOAD failed\n", __func__); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); ret = -EACCES; goto out_drop_priv; } if (!(status & SPU_STATUS_ISOLATED_STATE)) { /* This isn't allowed by the CBEA, but check anyway */ pr_debug("%s: SPU fell out of isolated mode?\n", __func__); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); ret = -EINVAL; goto out_drop_priv; } out_drop_priv: /* Finished accessing the loader. Drop kernel mode */ sr1 |= MFC_STATE1_PROBLEM_STATE_MASK; spu_mfc_sr1_set(ctx->spu, sr1); out: return ret; } static int spu_run_init(struct spu_context *ctx, u32 *npc) { unsigned long runcntl = SPU_RUNCNTL_RUNNABLE; int ret; spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); /* * NOSCHED is synchronous scheduling with respect to the caller. * The caller waits for the context to be loaded. */ if (ctx->flags & SPU_CREATE_NOSCHED) { if (ctx->state == SPU_STATE_SAVED) { ret = spu_activate(ctx, 0); if (ret) return ret; } } /* * Apply special setup as required. */ if (ctx->flags & SPU_CREATE_ISOLATE) { if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { ret = spu_setup_isolated(ctx); if (ret) return ret; } /* * If userspace has set the runcntrl register (eg, to * issue an isolated exit), we need to re-set it here */ runcntl = ctx->ops->runcntl_read(ctx) & (SPU_RUNCNTL_RUNNABLE | SPU_RUNCNTL_ISOLATE); if (runcntl == 0) runcntl = SPU_RUNCNTL_RUNNABLE; } else { unsigned long privcntl; if (test_thread_flag(TIF_SINGLESTEP)) privcntl = SPU_PRIVCNTL_MODE_SINGLE_STEP; else privcntl = SPU_PRIVCNTL_MODE_NORMAL; ctx->ops->privcntl_write(ctx, privcntl); ctx->ops->npc_write(ctx, *npc); } ctx->ops->runcntl_write(ctx, runcntl); if (ctx->flags & SPU_CREATE_NOSCHED) { spuctx_switch_state(ctx, SPU_UTIL_USER); } else { if (ctx->state == SPU_STATE_SAVED) { ret = spu_activate(ctx, 0); if (ret) return ret; } else { spuctx_switch_state(ctx, SPU_UTIL_USER); } } set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); return 0; } static int spu_run_fini(struct spu_context *ctx, u32 *npc, u32 *status) { int ret = 0; spu_del_from_rq(ctx); *status = ctx->ops->status_read(ctx); *npc = ctx->ops->npc_read(ctx); spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status); spu_release(ctx); if (signal_pending(current)) ret = -ERESTARTSYS; return ret; } /* * SPU syscall restarting is tricky because we violate the basic * assumption that the signal handler is running on the interrupted * thread. Here instead, the handler runs on PowerPC user space code, * while the syscall was called from the SPU. * This means we can only do a very rough approximation of POSIX * signal semantics. */ static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret, unsigned int *npc) { int ret; switch (*spu_ret) { case -ERESTARTSYS: case -ERESTARTNOINTR: /* * Enter the regular syscall restarting for * sys_spu_run, then restart the SPU syscall * callback. */ *npc -= 8; ret = -ERESTARTSYS; break; case -ERESTARTNOHAND: case -ERESTART_RESTARTBLOCK: /* * Restart block is too hard for now, just return -EINTR * to the SPU. * ERESTARTNOHAND comes from sys_pause, we also return * -EINTR from there. * Assume that we need to be restarted ourselves though. */ *spu_ret = -EINTR; ret = -ERESTARTSYS; break; default: printk(KERN_WARNING "%s: unexpected return code %ld\n", __func__, *spu_ret); ret = 0; } return ret; } static int spu_process_callback(struct spu_context *ctx) { struct spu_syscall_block s; u32 ls_pointer, npc; void __iomem *ls; long spu_ret; int ret; /* get syscall block from local store */ npc = ctx->ops->npc_read(ctx) & ~3; ls = (void __iomem *)ctx->ops->get_ls(ctx); ls_pointer = in_be32(ls + npc); if (ls_pointer > (LS_SIZE - sizeof(s))) return -EFAULT; memcpy_fromio(&s, ls + ls_pointer, sizeof(s)); /* do actual syscall without pinning the spu */ ret = 0; spu_ret = -ENOSYS; npc += 4; if (s.nr_ret < NR_syscalls) { spu_release(ctx); /* do actual system call from here */ spu_ret = spu_sys_callback(&s); if (spu_ret <= -ERESTARTSYS) { ret = spu_handle_restartsys(ctx, &spu_ret, &npc); } mutex_lock(&ctx->state_mutex); if (ret == -ERESTARTSYS) return ret; } /* need to re-get the ls, as it may have changed when we released the * spu */ ls = (void __iomem *)ctx->ops->get_ls(ctx); /* write result, jump over indirect pointer */ memcpy_toio(ls + ls_pointer, &spu_ret, sizeof(spu_ret)); ctx->ops->npc_write(ctx, npc); ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); return ret; } long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) { int ret; u32 status; if (mutex_lock_interruptible(&ctx->run_mutex)) return -ERESTARTSYS; ctx->event_return = 0; ret = spu_acquire(ctx); if (ret) goto out_unlock; spu_enable_spu(ctx); spu_update_sched_info(ctx); ret = spu_run_init(ctx, npc); if (ret) { spu_release(ctx); goto out; } do { ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); if (unlikely(ret)) { /* * This is nasty: we need the state_mutex for all the * bookkeeping even if the syscall was interrupted by * a signal. ewww. */ mutex_lock(&ctx->state_mutex); break; } if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags))) { if (!(status & SPU_STATUS_STOPPED_BY_STOP)) continue; } spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); if ((status & SPU_STATUS_STOPPED_BY_STOP) && (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { ret = spu_process_callback(ctx); if (ret) break; status &= ~SPU_STATUS_STOPPED_BY_STOP; } ret = spufs_handle_class1(ctx); if (ret) break; ret = spufs_handle_class0(ctx); if (ret) break; if (signal_pending(current)) ret = -ERESTARTSYS; } while (!ret && !(status & (SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_SINGLE_STEP))); spu_disable_spu(ctx); ret = spu_run_fini(ctx, npc, &status); spu_yield(ctx); if ((status & SPU_STATUS_STOPPED_BY_STOP) && (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100)) ctx->stats.libassist++; if ((ret == 0) || ((ret == -ERESTARTSYS) && ((status & SPU_STATUS_STOPPED_BY_HALT) || (status & SPU_STATUS_SINGLE_STEP) || ((status & SPU_STATUS_STOPPED_BY_STOP) && (status >> SPU_STOP_STATUS_SHIFT != 0x2104))))) ret = status; /* Note: we don't need to force_sig SIGTRAP on single-step * since we have TIF_SINGLESTEP set, thus the kernel will do * it upon return from the syscall anyway. */ if (unlikely(status & SPU_STATUS_SINGLE_STEP)) ret = -ERESTARTSYS; else if (unlikely((status & SPU_STATUS_STOPPED_BY_STOP) && (status >> SPU_STOP_STATUS_SHIFT) == 0x3fff)) { force_sig(SIGTRAP); ret = -ERESTARTSYS; } out: *event = ctx->event_return; out_unlock: mutex_unlock(&ctx->run_mutex); return ret; }
linux-master
arch/powerpc/platforms/cell/spufs/run.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SPU file system * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Arnd Bergmann <[email protected]> */ #include <linux/list.h> #include <linux/slab.h> #include "spufs.h" struct spu_gang *alloc_spu_gang(void) { struct spu_gang *gang; gang = kzalloc(sizeof *gang, GFP_KERNEL); if (!gang) goto out; kref_init(&gang->kref); mutex_init(&gang->mutex); mutex_init(&gang->aff_mutex); INIT_LIST_HEAD(&gang->list); INIT_LIST_HEAD(&gang->aff_list_head); out: return gang; } static void destroy_spu_gang(struct kref *kref) { struct spu_gang *gang; gang = container_of(kref, struct spu_gang, kref); WARN_ON(gang->contexts || !list_empty(&gang->list)); kfree(gang); } struct spu_gang *get_spu_gang(struct spu_gang *gang) { kref_get(&gang->kref); return gang; } int put_spu_gang(struct spu_gang *gang) { return kref_put(&gang->kref, &destroy_spu_gang); } void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx) { mutex_lock(&gang->mutex); ctx->gang = get_spu_gang(gang); list_add(&ctx->gang_list, &gang->list); gang->contexts++; mutex_unlock(&gang->mutex); } void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx) { mutex_lock(&gang->mutex); WARN_ON(ctx->gang != gang); if (!list_empty(&ctx->aff_list)) { list_del_init(&ctx->aff_list); gang->aff_flags &= ~AFF_OFFSETS_SET; } list_del_init(&ctx->gang_list); gang->contexts--; mutex_unlock(&gang->mutex); put_spu_gang(gang); }
linux-master
arch/powerpc/platforms/cell/spufs/gang.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/file.h> #include <linux/fs.h> #include <linux/export.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/slab.h> #include <linux/uaccess.h> #include "spufs.h" /** * sys_spu_run - run code loaded into an SPU * * @unpc: next program counter for the SPU * @ustatus: status of the SPU * * This system call transfers the control of execution of a * user space thread to an SPU. It will return when the * SPU has finished executing or when it hits an error * condition and it will be interrupted if a signal needs * to be delivered to a handler in user space. * * The next program counter is set to the passed value * before the SPU starts fetching code and the user space * pointer gets updated with the new value when returning * from kernel space. * * The status value returned from spu_run reflects the * value of the spu_status register after the SPU has stopped. * */ static long do_spu_run(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus) { long ret; struct spufs_inode_info *i; u32 npc, status; ret = -EFAULT; if (get_user(npc, unpc)) goto out; /* check if this file was created by spu_create */ ret = -EINVAL; if (filp->f_op != &spufs_context_fops) goto out; i = SPUFS_I(file_inode(filp)); ret = spufs_run_spu(i->i_ctx, &npc, &status); if (put_user(npc, unpc)) ret = -EFAULT; if (ustatus && put_user(status, ustatus)) ret = -EFAULT; out: return ret; } static long do_spu_create(const char __user *pathname, unsigned int flags, umode_t mode, struct file *neighbor) { struct path path; struct dentry *dentry; int ret; dentry = user_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY); ret = PTR_ERR(dentry); if (!IS_ERR(dentry)) { ret = spufs_create(&path, dentry, flags, mode, neighbor); done_path_create(&path, dentry); } return ret; } struct spufs_calls spufs_calls = { .create_thread = do_spu_create, .spu_run = do_spu_run, .notify_spus_active = do_notify_spus_active, .owner = THIS_MODULE, #ifdef CONFIG_COREDUMP .coredump_extra_notes_size = spufs_coredump_extra_notes_size, .coredump_extra_notes_write = spufs_coredump_extra_notes_write, #endif };
linux-master
arch/powerpc/platforms/cell/spufs/syscalls.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SPU file system * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Arnd Bergmann <[email protected]> */ #include <linux/file.h> #include <linux/fs.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/fsnotify.h> #include <linux/backing-dev.h> #include <linux/init.h> #include <linux/ioctl.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/poll.h> #include <linux/of.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/spu.h> #include <asm/spu_priv1.h> #include <linux/uaccess.h> #include "spufs.h" struct spufs_sb_info { bool debug; }; static struct kmem_cache *spufs_inode_cache; char *isolated_loader; static int isolated_loader_size; static struct spufs_sb_info *spufs_get_sb_info(struct super_block *sb) { return sb->s_fs_info; } static struct inode * spufs_alloc_inode(struct super_block *sb) { struct spufs_inode_info *ei; ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL); if (!ei) return NULL; ei->i_gang = NULL; ei->i_ctx = NULL; ei->i_openers = 0; return &ei->vfs_inode; } static void spufs_free_inode(struct inode *inode) { kmem_cache_free(spufs_inode_cache, SPUFS_I(inode)); } static void spufs_init_once(void *p) { struct spufs_inode_info *ei = p; inode_init_once(&ei->vfs_inode); } static struct inode * spufs_new_inode(struct super_block *sb, umode_t mode) { struct inode *inode; inode = new_inode(sb); if (!inode) goto out; inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode); out: return inode; } static int spufs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); if ((attr->ia_valid & ATTR_SIZE) && (attr->ia_size != inode->i_size)) return -EINVAL; setattr_copy(&nop_mnt_idmap, inode, attr); mark_inode_dirty(inode); return 0; } static int spufs_new_file(struct super_block *sb, struct dentry *dentry, const struct file_operations *fops, umode_t mode, size_t size, struct spu_context *ctx) { static const struct inode_operations spufs_file_iops = { .setattr = spufs_setattr, }; struct inode *inode; int ret; ret = -ENOSPC; inode = spufs_new_inode(sb, S_IFREG | mode); if (!inode) goto out; ret = 0; inode->i_op = &spufs_file_iops; inode->i_fop = fops; inode->i_size = size; inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx); d_add(dentry, inode); out: return ret; } static void spufs_evict_inode(struct inode *inode) { struct spufs_inode_info *ei = SPUFS_I(inode); clear_inode(inode); if (ei->i_ctx) put_spu_context(ei->i_ctx); if (ei->i_gang) put_spu_gang(ei->i_gang); } static void spufs_prune_dir(struct dentry *dir) { struct dentry *dentry, *tmp; inode_lock(d_inode(dir)); list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) { spin_lock(&dentry->d_lock); if (simple_positive(dentry)) { dget_dlock(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); simple_unlink(d_inode(dir), dentry); /* XXX: what was dcache_lock protecting here? Other * filesystems (IB, configfs) release dcache_lock * before unlink */ dput(dentry); } else { spin_unlock(&dentry->d_lock); } } shrink_dcache_parent(dir); inode_unlock(d_inode(dir)); } /* Caller must hold parent->i_mutex */ static int spufs_rmdir(struct inode *parent, struct dentry *dir) { /* remove all entries */ int res; spufs_prune_dir(dir); d_drop(dir); res = simple_rmdir(parent, dir); /* We have to give up the mm_struct */ spu_forget(SPUFS_I(d_inode(dir))->i_ctx); return res; } static int spufs_fill_dir(struct dentry *dir, const struct spufs_tree_descr *files, umode_t mode, struct spu_context *ctx) { while (files->name && files->name[0]) { int ret; struct dentry *dentry = d_alloc_name(dir, files->name); if (!dentry) return -ENOMEM; ret = spufs_new_file(dir->d_sb, dentry, files->ops, files->mode & mode, files->size, ctx); if (ret) return ret; files++; } return 0; } static int spufs_dir_close(struct inode *inode, struct file *file) { struct inode *parent; struct dentry *dir; int ret; dir = file->f_path.dentry; parent = d_inode(dir->d_parent); inode_lock_nested(parent, I_MUTEX_PARENT); ret = spufs_rmdir(parent, dir); inode_unlock(parent); WARN_ON(ret); return dcache_dir_close(inode, file); } const struct file_operations spufs_context_fops = { .open = dcache_dir_open, .release = spufs_dir_close, .llseek = dcache_dir_lseek, .read = generic_read_dir, .iterate_shared = dcache_readdir, .fsync = noop_fsync, }; EXPORT_SYMBOL_GPL(spufs_context_fops); static int spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, umode_t mode) { int ret; struct inode *inode; struct spu_context *ctx; inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR); if (!inode) return -ENOSPC; inode_init_owner(&nop_mnt_idmap, inode, dir, mode | S_IFDIR); ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */ SPUFS_I(inode)->i_ctx = ctx; if (!ctx) { iput(inode); return -ENOSPC; } ctx->flags = flags; inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; inode_lock(inode); dget(dentry); inc_nlink(dir); inc_nlink(inode); d_instantiate(dentry, inode); if (flags & SPU_CREATE_NOSCHED) ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents, mode, ctx); else ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx); if (!ret && spufs_get_sb_info(dir->i_sb)->debug) ret = spufs_fill_dir(dentry, spufs_dir_debug_contents, mode, ctx); if (ret) spufs_rmdir(dir, dentry); inode_unlock(inode); return ret; } static int spufs_context_open(const struct path *path) { int ret; struct file *filp; ret = get_unused_fd_flags(0); if (ret < 0) return ret; filp = dentry_open(path, O_RDONLY, current_cred()); if (IS_ERR(filp)) { put_unused_fd(ret); return PTR_ERR(filp); } filp->f_op = &spufs_context_fops; fd_install(ret, filp); return ret; } static struct spu_context * spufs_assert_affinity(unsigned int flags, struct spu_gang *gang, struct file *filp) { struct spu_context *tmp, *neighbor, *err; int count, node; int aff_supp; aff_supp = !list_empty(&(list_entry(cbe_spu_info[0].spus.next, struct spu, cbe_list))->aff_list); if (!aff_supp) return ERR_PTR(-EINVAL); if (flags & SPU_CREATE_GANG) return ERR_PTR(-EINVAL); if (flags & SPU_CREATE_AFFINITY_MEM && gang->aff_ref_ctx && gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM) return ERR_PTR(-EEXIST); if (gang->aff_flags & AFF_MERGED) return ERR_PTR(-EBUSY); neighbor = NULL; if (flags & SPU_CREATE_AFFINITY_SPU) { if (!filp || filp->f_op != &spufs_context_fops) return ERR_PTR(-EINVAL); neighbor = get_spu_context( SPUFS_I(file_inode(filp))->i_ctx); if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) && !list_is_last(&neighbor->aff_list, &gang->aff_list_head) && !list_entry(neighbor->aff_list.next, struct spu_context, aff_list)->aff_head) { err = ERR_PTR(-EEXIST); goto out_put_neighbor; } if (gang != neighbor->gang) { err = ERR_PTR(-EINVAL); goto out_put_neighbor; } count = 1; list_for_each_entry(tmp, &gang->aff_list_head, aff_list) count++; if (list_empty(&neighbor->aff_list)) count++; for (node = 0; node < MAX_NUMNODES; node++) { if ((cbe_spu_info[node].n_spus - atomic_read( &cbe_spu_info[node].reserved_spus)) >= count) break; } if (node == MAX_NUMNODES) { err = ERR_PTR(-EEXIST); goto out_put_neighbor; } } return neighbor; out_put_neighbor: put_spu_context(neighbor); return err; } static void spufs_set_affinity(unsigned int flags, struct spu_context *ctx, struct spu_context *neighbor) { if (flags & SPU_CREATE_AFFINITY_MEM) ctx->gang->aff_ref_ctx = ctx; if (flags & SPU_CREATE_AFFINITY_SPU) { if (list_empty(&neighbor->aff_list)) { list_add_tail(&neighbor->aff_list, &ctx->gang->aff_list_head); neighbor->aff_head = 1; } if (list_is_last(&neighbor->aff_list, &ctx->gang->aff_list_head) || list_entry(neighbor->aff_list.next, struct spu_context, aff_list)->aff_head) { list_add(&ctx->aff_list, &neighbor->aff_list); } else { list_add_tail(&ctx->aff_list, &neighbor->aff_list); if (neighbor->aff_head) { neighbor->aff_head = 0; ctx->aff_head = 1; } } if (!ctx->gang->aff_ref_ctx) ctx->gang->aff_ref_ctx = ctx; } } static int spufs_create_context(struct inode *inode, struct dentry *dentry, struct vfsmount *mnt, int flags, umode_t mode, struct file *aff_filp) { int ret; int affinity; struct spu_gang *gang; struct spu_context *neighbor; struct path path = {.mnt = mnt, .dentry = dentry}; if ((flags & SPU_CREATE_NOSCHED) && !capable(CAP_SYS_NICE)) return -EPERM; if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE)) == SPU_CREATE_ISOLATE) return -EINVAL; if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) return -ENODEV; gang = NULL; neighbor = NULL; affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); if (affinity) { gang = SPUFS_I(inode)->i_gang; if (!gang) return -EINVAL; mutex_lock(&gang->aff_mutex); neighbor = spufs_assert_affinity(flags, gang, aff_filp); if (IS_ERR(neighbor)) { ret = PTR_ERR(neighbor); goto out_aff_unlock; } } ret = spufs_mkdir(inode, dentry, flags, mode & 0777); if (ret) goto out_aff_unlock; if (affinity) { spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx, neighbor); if (neighbor) put_spu_context(neighbor); } ret = spufs_context_open(&path); if (ret < 0) WARN_ON(spufs_rmdir(inode, dentry)); out_aff_unlock: if (affinity) mutex_unlock(&gang->aff_mutex); return ret; } static int spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) { int ret; struct inode *inode; struct spu_gang *gang; ret = -ENOSPC; inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR); if (!inode) goto out; ret = 0; inode_init_owner(&nop_mnt_idmap, inode, dir, mode | S_IFDIR); gang = alloc_spu_gang(); SPUFS_I(inode)->i_ctx = NULL; SPUFS_I(inode)->i_gang = gang; if (!gang) { ret = -ENOMEM; goto out_iput; } inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; d_instantiate(dentry, inode); inc_nlink(dir); inc_nlink(d_inode(dentry)); return ret; out_iput: iput(inode); out: return ret; } static int spufs_gang_open(const struct path *path) { int ret; struct file *filp; ret = get_unused_fd_flags(0); if (ret < 0) return ret; /* * get references for dget and mntget, will be released * in error path of *_open(). */ filp = dentry_open(path, O_RDONLY, current_cred()); if (IS_ERR(filp)) { put_unused_fd(ret); return PTR_ERR(filp); } filp->f_op = &simple_dir_operations; fd_install(ret, filp); return ret; } static int spufs_create_gang(struct inode *inode, struct dentry *dentry, struct vfsmount *mnt, umode_t mode) { struct path path = {.mnt = mnt, .dentry = dentry}; int ret; ret = spufs_mkgang(inode, dentry, mode & 0777); if (!ret) { ret = spufs_gang_open(&path); if (ret < 0) { int err = simple_rmdir(inode, dentry); WARN_ON(err); } } return ret; } static struct file_system_type spufs_type; long spufs_create(const struct path *path, struct dentry *dentry, unsigned int flags, umode_t mode, struct file *filp) { struct inode *dir = d_inode(path->dentry); int ret; /* check if we are on spufs */ if (path->dentry->d_sb->s_type != &spufs_type) return -EINVAL; /* don't accept undefined flags */ if (flags & (~SPU_CREATE_FLAG_ALL)) return -EINVAL; /* only threads can be underneath a gang */ if (path->dentry != path->dentry->d_sb->s_root) if ((flags & SPU_CREATE_GANG) || !SPUFS_I(dir)->i_gang) return -EINVAL; mode &= ~current_umask(); if (flags & SPU_CREATE_GANG) ret = spufs_create_gang(dir, dentry, path->mnt, mode); else ret = spufs_create_context(dir, dentry, path->mnt, flags, mode, filp); if (ret >= 0) fsnotify_mkdir(dir, dentry); return ret; } /* File system initialization */ struct spufs_fs_context { kuid_t uid; kgid_t gid; umode_t mode; }; enum { Opt_uid, Opt_gid, Opt_mode, Opt_debug, }; static const struct fs_parameter_spec spufs_fs_parameters[] = { fsparam_u32 ("gid", Opt_gid), fsparam_u32oct ("mode", Opt_mode), fsparam_u32 ("uid", Opt_uid), fsparam_flag ("debug", Opt_debug), {} }; static int spufs_show_options(struct seq_file *m, struct dentry *root) { struct spufs_sb_info *sbi = spufs_get_sb_info(root->d_sb); struct inode *inode = root->d_inode; if (!uid_eq(inode->i_uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, inode->i_uid)); if (!gid_eq(inode->i_gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, inode->i_gid)); if ((inode->i_mode & S_IALLUGO) != 0775) seq_printf(m, ",mode=%o", inode->i_mode); if (sbi->debug) seq_puts(m, ",debug"); return 0; } static int spufs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct spufs_fs_context *ctx = fc->fs_private; struct spufs_sb_info *sbi = fc->s_fs_info; struct fs_parse_result result; kuid_t uid; kgid_t gid; int opt; opt = fs_parse(fc, spufs_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_uid: uid = make_kuid(current_user_ns(), result.uint_32); if (!uid_valid(uid)) return invalf(fc, "Unknown uid"); ctx->uid = uid; break; case Opt_gid: gid = make_kgid(current_user_ns(), result.uint_32); if (!gid_valid(gid)) return invalf(fc, "Unknown gid"); ctx->gid = gid; break; case Opt_mode: ctx->mode = result.uint_32 & S_IALLUGO; break; case Opt_debug: sbi->debug = true; break; } return 0; } static void spufs_exit_isolated_loader(void) { free_pages((unsigned long) isolated_loader, get_order(isolated_loader_size)); } static void __init spufs_init_isolated_loader(void) { struct device_node *dn; const char *loader; int size; dn = of_find_node_by_path("/spu-isolation"); if (!dn) return; loader = of_get_property(dn, "loader", &size); of_node_put(dn); if (!loader) return; /* the loader must be align on a 16 byte boundary */ isolated_loader = (char *)__get_free_pages(GFP_KERNEL, get_order(size)); if (!isolated_loader) return; isolated_loader_size = size; memcpy(isolated_loader, loader, size); printk(KERN_INFO "spufs: SPU isolation mode enabled\n"); } static int spufs_create_root(struct super_block *sb, struct fs_context *fc) { struct spufs_fs_context *ctx = fc->fs_private; struct inode *inode; if (!spu_management_ops) return -ENODEV; inode = spufs_new_inode(sb, S_IFDIR | ctx->mode); if (!inode) return -ENOMEM; inode->i_uid = ctx->uid; inode->i_gid = ctx->gid; inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; SPUFS_I(inode)->i_ctx = NULL; inc_nlink(inode); sb->s_root = d_make_root(inode); if (!sb->s_root) return -ENOMEM; return 0; } static const struct super_operations spufs_ops = { .alloc_inode = spufs_alloc_inode, .free_inode = spufs_free_inode, .statfs = simple_statfs, .evict_inode = spufs_evict_inode, .show_options = spufs_show_options, }; static int spufs_fill_super(struct super_block *sb, struct fs_context *fc) { sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = SPUFS_MAGIC; sb->s_op = &spufs_ops; return spufs_create_root(sb, fc); } static int spufs_get_tree(struct fs_context *fc) { return get_tree_single(fc, spufs_fill_super); } static void spufs_free_fc(struct fs_context *fc) { kfree(fc->s_fs_info); } static const struct fs_context_operations spufs_context_ops = { .free = spufs_free_fc, .parse_param = spufs_parse_param, .get_tree = spufs_get_tree, }; static int spufs_init_fs_context(struct fs_context *fc) { struct spufs_fs_context *ctx; struct spufs_sb_info *sbi; ctx = kzalloc(sizeof(struct spufs_fs_context), GFP_KERNEL); if (!ctx) goto nomem; sbi = kzalloc(sizeof(struct spufs_sb_info), GFP_KERNEL); if (!sbi) goto nomem_ctx; ctx->uid = current_uid(); ctx->gid = current_gid(); ctx->mode = 0755; fc->fs_private = ctx; fc->s_fs_info = sbi; fc->ops = &spufs_context_ops; return 0; nomem_ctx: kfree(ctx); nomem: return -ENOMEM; } static struct file_system_type spufs_type = { .owner = THIS_MODULE, .name = "spufs", .init_fs_context = spufs_init_fs_context, .parameters = spufs_fs_parameters, .kill_sb = kill_litter_super, }; MODULE_ALIAS_FS("spufs"); static int __init spufs_init(void) { int ret; ret = -ENODEV; if (!spu_management_ops) goto out; ret = -ENOMEM; spufs_inode_cache = kmem_cache_create("spufs_inode_cache", sizeof(struct spufs_inode_info), 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, spufs_init_once); if (!spufs_inode_cache) goto out; ret = spu_sched_init(); if (ret) goto out_cache; ret = register_spu_syscalls(&spufs_calls); if (ret) goto out_sched; ret = register_filesystem(&spufs_type); if (ret) goto out_syscalls; spufs_init_isolated_loader(); return 0; out_syscalls: unregister_spu_syscalls(&spufs_calls); out_sched: spu_sched_exit(); out_cache: kmem_cache_destroy(spufs_inode_cache); out: return ret; } module_init(spufs_init); static void __exit spufs_exit(void) { spu_sched_exit(); spufs_exit_isolated_loader(); unregister_spu_syscalls(&spufs_calls); unregister_filesystem(&spufs_type); kmem_cache_destroy(spufs_inode_cache); } module_exit(spufs_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnd Bergmann <[email protected]>");
linux-master
arch/powerpc/platforms/cell/spufs/inode.c
// SPDX-License-Identifier: GPL-2.0-or-later /* backing_ops.c - query/set operations on saved SPU context. * * Copyright (C) IBM 2005 * Author: Mark Nutter <[email protected]> * * These register operations allow SPUFS to operate on saved * SPU contexts rather than hardware. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/poll.h> #include <asm/io.h> #include <asm/spu.h> #include <asm/spu_csa.h> #include <asm/spu_info.h> #include <asm/mmu_context.h> #include "spufs.h" /* * Reads/writes to various problem and priv2 registers require * state changes, i.e. generate SPU events, modify channel * counts, etc. */ static void gen_spu_event(struct spu_context *ctx, u32 event) { u64 ch0_cnt; u64 ch0_data; u64 ch1_data; ch0_cnt = ctx->csa.spu_chnlcnt_RW[0]; ch0_data = ctx->csa.spu_chnldata_RW[0]; ch1_data = ctx->csa.spu_chnldata_RW[1]; ctx->csa.spu_chnldata_RW[0] |= event; if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) { ctx->csa.spu_chnlcnt_RW[0] = 1; } } static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data) { u32 mbox_stat; int ret = 0; spin_lock(&ctx->csa.register_lock); mbox_stat = ctx->csa.prob.mb_stat_R; if (mbox_stat & 0x0000ff) { /* Read the first available word. * Implementation note: the depth * of pu_mb_R is currently 1. */ *data = ctx->csa.prob.pu_mb_R; ctx->csa.prob.mb_stat_R &= ~(0x0000ff); ctx->csa.spu_chnlcnt_RW[28] = 1; gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT); ret = 4; } spin_unlock(&ctx->csa.register_lock); return ret; } static u32 spu_backing_mbox_stat_read(struct spu_context *ctx) { return ctx->csa.prob.mb_stat_R; } static __poll_t spu_backing_mbox_stat_poll(struct spu_context *ctx, __poll_t events) { __poll_t ret; u32 stat; ret = 0; spin_lock_irq(&ctx->csa.register_lock); stat = ctx->csa.prob.mb_stat_R; /* if the requested event is there, return the poll mask, otherwise enable the interrupt to get notified, but first mark any pending interrupts as done so we don't get woken up unnecessarily */ if (events & (EPOLLIN | EPOLLRDNORM)) { if (stat & 0xff0000) ret |= EPOLLIN | EPOLLRDNORM; else { ctx->csa.priv1.int_stat_class2_RW &= ~CLASS2_MAILBOX_INTR; ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR; } } if (events & (EPOLLOUT | EPOLLWRNORM)) { if (stat & 0x00ff00) ret = EPOLLOUT | EPOLLWRNORM; else { ctx->csa.priv1.int_stat_class2_RW &= ~CLASS2_MAILBOX_THRESHOLD_INTR; ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR; } } spin_unlock_irq(&ctx->csa.register_lock); return ret; } static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data) { int ret; spin_lock(&ctx->csa.register_lock); if (ctx->csa.prob.mb_stat_R & 0xff0000) { /* Read the first available word. * Implementation note: the depth * of puint_mb_R is currently 1. */ *data = ctx->csa.priv2.puint_mb_R; ctx->csa.prob.mb_stat_R &= ~(0xff0000); ctx->csa.spu_chnlcnt_RW[30] = 1; gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT); ret = 4; } else { /* make sure we get woken up by the interrupt */ ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR; ret = 0; } spin_unlock(&ctx->csa.register_lock); return ret; } static int spu_backing_wbox_write(struct spu_context *ctx, u32 data) { int ret; spin_lock(&ctx->csa.register_lock); if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) { int slot = ctx->csa.spu_chnlcnt_RW[29]; int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8; /* We have space to write wbox_data. * Implementation note: the depth * of spu_mb_W is currently 4. */ BUG_ON(avail != (4 - slot)); ctx->csa.spu_mailbox_data[slot] = data; ctx->csa.spu_chnlcnt_RW[29] = ++slot; ctx->csa.prob.mb_stat_R &= ~(0x00ff00); ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8); gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT); ret = 4; } else { /* make sure we get woken up by the interrupt when space becomes available */ ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR; ret = 0; } spin_unlock(&ctx->csa.register_lock); return ret; } static u32 spu_backing_signal1_read(struct spu_context *ctx) { return ctx->csa.spu_chnldata_RW[3]; } static void spu_backing_signal1_write(struct spu_context *ctx, u32 data) { spin_lock(&ctx->csa.register_lock); if (ctx->csa.priv2.spu_cfg_RW & 0x1) ctx->csa.spu_chnldata_RW[3] |= data; else ctx->csa.spu_chnldata_RW[3] = data; ctx->csa.spu_chnlcnt_RW[3] = 1; gen_spu_event(ctx, MFC_SIGNAL_1_EVENT); spin_unlock(&ctx->csa.register_lock); } static u32 spu_backing_signal2_read(struct spu_context *ctx) { return ctx->csa.spu_chnldata_RW[4]; } static void spu_backing_signal2_write(struct spu_context *ctx, u32 data) { spin_lock(&ctx->csa.register_lock); if (ctx->csa.priv2.spu_cfg_RW & 0x2) ctx->csa.spu_chnldata_RW[4] |= data; else ctx->csa.spu_chnldata_RW[4] = data; ctx->csa.spu_chnlcnt_RW[4] = 1; gen_spu_event(ctx, MFC_SIGNAL_2_EVENT); spin_unlock(&ctx->csa.register_lock); } static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val) { u64 tmp; spin_lock(&ctx->csa.register_lock); tmp = ctx->csa.priv2.spu_cfg_RW; if (val) tmp |= 1; else tmp &= ~1; ctx->csa.priv2.spu_cfg_RW = tmp; spin_unlock(&ctx->csa.register_lock); } static u64 spu_backing_signal1_type_get(struct spu_context *ctx) { return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0); } static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val) { u64 tmp; spin_lock(&ctx->csa.register_lock); tmp = ctx->csa.priv2.spu_cfg_RW; if (val) tmp |= 2; else tmp &= ~2; ctx->csa.priv2.spu_cfg_RW = tmp; spin_unlock(&ctx->csa.register_lock); } static u64 spu_backing_signal2_type_get(struct spu_context *ctx) { return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0); } static u32 spu_backing_npc_read(struct spu_context *ctx) { return ctx->csa.prob.spu_npc_RW; } static void spu_backing_npc_write(struct spu_context *ctx, u32 val) { ctx->csa.prob.spu_npc_RW = val; } static u32 spu_backing_status_read(struct spu_context *ctx) { return ctx->csa.prob.spu_status_R; } static char *spu_backing_get_ls(struct spu_context *ctx) { return ctx->csa.lscsa->ls; } static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val) { ctx->csa.priv2.spu_privcntl_RW = val; } static u32 spu_backing_runcntl_read(struct spu_context *ctx) { return ctx->csa.prob.spu_runcntl_RW; } static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val) { spin_lock(&ctx->csa.register_lock); ctx->csa.prob.spu_runcntl_RW = val; if (val & SPU_RUNCNTL_RUNNABLE) { ctx->csa.prob.spu_status_R &= ~SPU_STATUS_STOPPED_BY_STOP & ~SPU_STATUS_STOPPED_BY_HALT & ~SPU_STATUS_SINGLE_STEP & ~SPU_STATUS_INVALID_INSTR & ~SPU_STATUS_INVALID_CH; ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING; } else { ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING; } spin_unlock(&ctx->csa.register_lock); } static void spu_backing_runcntl_stop(struct spu_context *ctx) { spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP); } static void spu_backing_master_start(struct spu_context *ctx) { struct spu_state *csa = &ctx->csa; u64 sr1; spin_lock(&csa->register_lock); sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK; csa->priv1.mfc_sr1_RW = sr1; spin_unlock(&csa->register_lock); } static void spu_backing_master_stop(struct spu_context *ctx) { struct spu_state *csa = &ctx->csa; u64 sr1; spin_lock(&csa->register_lock); sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; csa->priv1.mfc_sr1_RW = sr1; spin_unlock(&csa->register_lock); } static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode) { struct spu_problem_collapsed *prob = &ctx->csa.prob; int ret; spin_lock(&ctx->csa.register_lock); ret = -EAGAIN; if (prob->dma_querytype_RW) goto out; ret = 0; /* FIXME: what are the side-effects of this? */ prob->dma_querymask_RW = mask; prob->dma_querytype_RW = mode; /* In the current implementation, the SPU context is always * acquired in runnable state when new bits are added to the * mask (tagwait), so it's sufficient just to mask * dma_tagstatus_R with the 'mask' parameter here. */ ctx->csa.prob.dma_tagstatus_R &= mask; out: spin_unlock(&ctx->csa.register_lock); return ret; } static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx) { return ctx->csa.prob.dma_tagstatus_R; } static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx) { return ctx->csa.prob.dma_qstatus_R; } static int spu_backing_send_mfc_command(struct spu_context *ctx, struct mfc_dma_command *cmd) { int ret; spin_lock(&ctx->csa.register_lock); ret = -EAGAIN; /* FIXME: set up priv2->puq */ spin_unlock(&ctx->csa.register_lock); return ret; } static void spu_backing_restart_dma(struct spu_context *ctx) { ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND; } struct spu_context_ops spu_backing_ops = { .mbox_read = spu_backing_mbox_read, .mbox_stat_read = spu_backing_mbox_stat_read, .mbox_stat_poll = spu_backing_mbox_stat_poll, .ibox_read = spu_backing_ibox_read, .wbox_write = spu_backing_wbox_write, .signal1_read = spu_backing_signal1_read, .signal1_write = spu_backing_signal1_write, .signal2_read = spu_backing_signal2_read, .signal2_write = spu_backing_signal2_write, .signal1_type_set = spu_backing_signal1_type_set, .signal1_type_get = spu_backing_signal1_type_get, .signal2_type_set = spu_backing_signal2_type_set, .signal2_type_get = spu_backing_signal2_type_get, .npc_read = spu_backing_npc_read, .npc_write = spu_backing_npc_write, .status_read = spu_backing_status_read, .get_ls = spu_backing_get_ls, .privcntl_write = spu_backing_privcntl_write, .runcntl_read = spu_backing_runcntl_read, .runcntl_write = spu_backing_runcntl_write, .runcntl_stop = spu_backing_runcntl_stop, .master_start = spu_backing_master_start, .master_stop = spu_backing_master_stop, .set_mfc_query = spu_backing_set_mfc_query, .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus, .get_mfc_free_elements = spu_backing_get_mfc_free_elements, .send_mfc_command = spu_backing_send_mfc_command, .restart_dma = spu_backing_restart_dma, };
linux-master
arch/powerpc/platforms/cell/spufs/backing_ops.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SPU file system -- SPU context management * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Arnd Bergmann <[email protected]> */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/atomic.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <asm/spu.h> #include <asm/spu_csa.h> #include "spufs.h" #include "sputrace.h" atomic_t nr_spu_contexts = ATOMIC_INIT(0); struct spu_context *alloc_spu_context(struct spu_gang *gang) { struct spu_context *ctx; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) goto out; /* Binding to physical processor deferred * until spu_activate(). */ if (spu_init_csa(&ctx->csa)) goto out_free; spin_lock_init(&ctx->mmio_lock); mutex_init(&ctx->mapping_lock); kref_init(&ctx->kref); mutex_init(&ctx->state_mutex); mutex_init(&ctx->run_mutex); init_waitqueue_head(&ctx->ibox_wq); init_waitqueue_head(&ctx->wbox_wq); init_waitqueue_head(&ctx->stop_wq); init_waitqueue_head(&ctx->mfc_wq); init_waitqueue_head(&ctx->run_wq); ctx->state = SPU_STATE_SAVED; ctx->ops = &spu_backing_ops; ctx->owner = get_task_mm(current); INIT_LIST_HEAD(&ctx->rq); INIT_LIST_HEAD(&ctx->aff_list); if (gang) spu_gang_add_ctx(gang, ctx); __spu_update_sched_info(ctx); spu_set_timeslice(ctx); ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; ctx->stats.tstamp = ktime_get_ns(); atomic_inc(&nr_spu_contexts); goto out; out_free: kfree(ctx); ctx = NULL; out: return ctx; } void destroy_spu_context(struct kref *kref) { struct spu_context *ctx; ctx = container_of(kref, struct spu_context, kref); spu_context_nospu_trace(destroy_spu_context__enter, ctx); mutex_lock(&ctx->state_mutex); spu_deactivate(ctx); mutex_unlock(&ctx->state_mutex); spu_fini_csa(&ctx->csa); if (ctx->gang) spu_gang_remove_ctx(ctx->gang, ctx); if (ctx->prof_priv_kref) kref_put(ctx->prof_priv_kref, ctx->prof_priv_release); BUG_ON(!list_empty(&ctx->rq)); atomic_dec(&nr_spu_contexts); kfree(ctx->switch_log); kfree(ctx); } struct spu_context * get_spu_context(struct spu_context *ctx) { kref_get(&ctx->kref); return ctx; } int put_spu_context(struct spu_context *ctx) { return kref_put(&ctx->kref, &destroy_spu_context); } /* give up the mm reference when the context is about to be destroyed */ void spu_forget(struct spu_context *ctx) { struct mm_struct *mm; /* * This is basically an open-coded spu_acquire_saved, except that * we don't acquire the state mutex interruptible, and we don't * want this context to be rescheduled on release. */ mutex_lock(&ctx->state_mutex); if (ctx->state != SPU_STATE_SAVED) spu_deactivate(ctx); mm = ctx->owner; ctx->owner = NULL; mmput(mm); spu_release(ctx); } void spu_unmap_mappings(struct spu_context *ctx) { mutex_lock(&ctx->mapping_lock); if (ctx->local_store) unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); if (ctx->mfc) unmap_mapping_range(ctx->mfc, 0, SPUFS_MFC_MAP_SIZE, 1); if (ctx->cntl) unmap_mapping_range(ctx->cntl, 0, SPUFS_CNTL_MAP_SIZE, 1); if (ctx->signal1) unmap_mapping_range(ctx->signal1, 0, SPUFS_SIGNAL_MAP_SIZE, 1); if (ctx->signal2) unmap_mapping_range(ctx->signal2, 0, SPUFS_SIGNAL_MAP_SIZE, 1); if (ctx->mss) unmap_mapping_range(ctx->mss, 0, SPUFS_MSS_MAP_SIZE, 1); if (ctx->psmap) unmap_mapping_range(ctx->psmap, 0, SPUFS_PS_MAP_SIZE, 1); mutex_unlock(&ctx->mapping_lock); } /** * spu_acquire_saved - lock spu contex and make sure it is in saved state * @ctx: spu contex to lock */ int spu_acquire_saved(struct spu_context *ctx) { int ret; spu_context_nospu_trace(spu_acquire_saved__enter, ctx); ret = spu_acquire(ctx); if (ret) return ret; if (ctx->state != SPU_STATE_SAVED) { set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags); spu_deactivate(ctx); } return 0; } /** * spu_release_saved - unlock spu context and return it to the runqueue * @ctx: context to unlock */ void spu_release_saved(struct spu_context *ctx) { BUG_ON(ctx->state != SPU_STATE_SAVED); if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags) && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spu_activate(ctx, 0); spu_release(ctx); }
linux-master
arch/powerpc/platforms/cell/spufs/context.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SPU file system -- file contents * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Arnd Bergmann <[email protected]> */ #undef DEBUG #include <linux/coredump.h> #include <linux/fs.h> #include <linux/ioctl.h> #include <linux/export.h> #include <linux/pagemap.h> #include <linux/poll.h> #include <linux/ptrace.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/time.h> #include <asm/spu.h> #include <asm/spu_info.h> #include <linux/uaccess.h> #include "spufs.h" #include "sputrace.h" #define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000) /* Simple attribute files */ struct spufs_attr { int (*get)(void *, u64 *); int (*set)(void *, u64); char get_buf[24]; /* enough to store a u64 and "\n\0" */ char set_buf[24]; void *data; const char *fmt; /* format for read operation */ struct mutex mutex; /* protects access to these buffers */ }; static int spufs_attr_open(struct inode *inode, struct file *file, int (*get)(void *, u64 *), int (*set)(void *, u64), const char *fmt) { struct spufs_attr *attr; attr = kmalloc(sizeof(*attr), GFP_KERNEL); if (!attr) return -ENOMEM; attr->get = get; attr->set = set; attr->data = inode->i_private; attr->fmt = fmt; mutex_init(&attr->mutex); file->private_data = attr; return nonseekable_open(inode, file); } static int spufs_attr_release(struct inode *inode, struct file *file) { kfree(file->private_data); return 0; } static ssize_t spufs_attr_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct spufs_attr *attr; size_t size; ssize_t ret; attr = file->private_data; if (!attr->get) return -EACCES; ret = mutex_lock_interruptible(&attr->mutex); if (ret) return ret; if (*ppos) { /* continued read */ size = strlen(attr->get_buf); } else { /* first read */ u64 val; ret = attr->get(attr->data, &val); if (ret) goto out; size = scnprintf(attr->get_buf, sizeof(attr->get_buf), attr->fmt, (unsigned long long)val); } ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size); out: mutex_unlock(&attr->mutex); return ret; } static ssize_t spufs_attr_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct spufs_attr *attr; u64 val; size_t size; ssize_t ret; attr = file->private_data; if (!attr->set) return -EACCES; ret = mutex_lock_interruptible(&attr->mutex); if (ret) return ret; ret = -EFAULT; size = min(sizeof(attr->set_buf) - 1, len); if (copy_from_user(attr->set_buf, buf, size)) goto out; ret = len; /* claim we got the whole input */ attr->set_buf[size] = '\0'; val = simple_strtol(attr->set_buf, NULL, 0); attr->set(attr->data, val); out: mutex_unlock(&attr->mutex); return ret; } static ssize_t spufs_dump_emit(struct coredump_params *cprm, void *buf, size_t size) { if (!dump_emit(cprm, buf, size)) return -EIO; return size; } #define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \ static int __fops ## _open(struct inode *inode, struct file *file) \ { \ __simple_attr_check_format(__fmt, 0ull); \ return spufs_attr_open(inode, file, __get, __set, __fmt); \ } \ static const struct file_operations __fops = { \ .open = __fops ## _open, \ .release = spufs_attr_release, \ .read = spufs_attr_read, \ .write = spufs_attr_write, \ .llseek = generic_file_llseek, \ }; static int spufs_mem_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->local_store = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return 0; } static int spufs_mem_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->local_store = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } static ssize_t spufs_mem_dump(struct spu_context *ctx, struct coredump_params *cprm) { return spufs_dump_emit(cprm, ctx->ops->get_ls(ctx), LS_SIZE); } static ssize_t spufs_mem_read(struct file *file, char __user *buffer, size_t size, loff_t *pos) { struct spu_context *ctx = file->private_data; ssize_t ret; ret = spu_acquire(ctx); if (ret) return ret; ret = simple_read_from_buffer(buffer, size, pos, ctx->ops->get_ls(ctx), LS_SIZE); spu_release(ctx); return ret; } static ssize_t spufs_mem_write(struct file *file, const char __user *buffer, size_t size, loff_t *ppos) { struct spu_context *ctx = file->private_data; char *local_store; loff_t pos = *ppos; int ret; if (pos > LS_SIZE) return -EFBIG; ret = spu_acquire(ctx); if (ret) return ret; local_store = ctx->ops->get_ls(ctx); size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size); spu_release(ctx); return size; } static vm_fault_t spufs_mem_mmap_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct spu_context *ctx = vma->vm_file->private_data; unsigned long pfn, offset; vm_fault_t ret; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= LS_SIZE) return VM_FAULT_SIGBUS; pr_debug("spufs_mem_mmap_fault address=0x%lx, offset=0x%lx\n", vmf->address, offset); if (spu_acquire(ctx)) return VM_FAULT_NOPAGE; if (ctx->state == SPU_STATE_SAVED) { vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); } else { vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; } ret = vmf_insert_pfn(vma, vmf->address, pfn); spu_release(ctx); return ret; } static int spufs_mem_mmap_access(struct vm_area_struct *vma, unsigned long address, void *buf, int len, int write) { struct spu_context *ctx = vma->vm_file->private_data; unsigned long offset = address - vma->vm_start; char *local_store; if (write && !(vma->vm_flags & VM_WRITE)) return -EACCES; if (spu_acquire(ctx)) return -EINTR; if ((offset + len) > vma->vm_end) len = vma->vm_end - offset; local_store = ctx->ops->get_ls(ctx); if (write) memcpy_toio(local_store + offset, buf, len); else memcpy_fromio(buf, local_store + offset, len); spu_release(ctx); return len; } static const struct vm_operations_struct spufs_mem_mmap_vmops = { .fault = spufs_mem_mmap_fault, .access = spufs_mem_mmap_access, }; static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); vma->vm_ops = &spufs_mem_mmap_vmops; return 0; } static const struct file_operations spufs_mem_fops = { .open = spufs_mem_open, .release = spufs_mem_release, .read = spufs_mem_read, .write = spufs_mem_write, .llseek = generic_file_llseek, .mmap = spufs_mem_mmap, }; static vm_fault_t spufs_ps_fault(struct vm_fault *vmf, unsigned long ps_offs, unsigned long ps_size) { struct spu_context *ctx = vmf->vma->vm_file->private_data; unsigned long area, offset = vmf->pgoff << PAGE_SHIFT; int err = 0; vm_fault_t ret = VM_FAULT_NOPAGE; spu_context_nospu_trace(spufs_ps_fault__enter, ctx); if (offset >= ps_size) return VM_FAULT_SIGBUS; if (fatal_signal_pending(current)) return VM_FAULT_SIGBUS; /* * Because we release the mmap_lock, the context may be destroyed while * we're in spu_wait. Grab an extra reference so it isn't destroyed * in the meantime. */ get_spu_context(ctx); /* * We have to wait for context to be loaded before we have * pages to hand out to the user, but we don't want to wait * with the mmap_lock held. * It is possible to drop the mmap_lock here, but then we need * to return VM_FAULT_NOPAGE because the mappings may have * hanged. */ if (spu_acquire(ctx)) goto refault; if (ctx->state == SPU_STATE_SAVED) { mmap_read_unlock(current->mm); spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); mmap_read_lock(current->mm); } else { area = ctx->spu->problem_phys + ps_offs; ret = vmf_insert_pfn(vmf->vma, vmf->address, (area + offset) >> PAGE_SHIFT); spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); } if (!err) spu_release(ctx); refault: put_spu_context(ctx); return ret; } #if SPUFS_MMAP_4K static vm_fault_t spufs_cntl_mmap_fault(struct vm_fault *vmf) { return spufs_ps_fault(vmf, 0x4000, SPUFS_CNTL_MAP_SIZE); } static const struct vm_operations_struct spufs_cntl_mmap_vmops = { .fault = spufs_cntl_mmap_fault, }; /* * mmap support for problem state control area [0x4000 - 0x4fff]. */ static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_cntl_mmap_vmops; return 0; } #else /* SPUFS_MMAP_4K */ #define spufs_cntl_mmap NULL #endif /* !SPUFS_MMAP_4K */ static int spufs_cntl_get(void *data, u64 *val) { struct spu_context *ctx = data; int ret; ret = spu_acquire(ctx); if (ret) return ret; *val = ctx->ops->status_read(ctx); spu_release(ctx); return 0; } static int spufs_cntl_set(void *data, u64 val) { struct spu_context *ctx = data; int ret; ret = spu_acquire(ctx); if (ret) return ret; ctx->ops->runcntl_write(ctx, val); spu_release(ctx); return 0; } static int spufs_cntl_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->cntl = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return simple_attr_open(inode, file, spufs_cntl_get, spufs_cntl_set, "0x%08lx"); } static int spufs_cntl_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; simple_attr_release(inode, file); mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->cntl = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } static const struct file_operations spufs_cntl_fops = { .open = spufs_cntl_open, .release = spufs_cntl_release, .read = simple_attr_read, .write = simple_attr_write, .llseek = no_llseek, .mmap = spufs_cntl_mmap, }; static int spufs_regs_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); file->private_data = i->i_ctx; return 0; } static ssize_t spufs_regs_dump(struct spu_context *ctx, struct coredump_params *cprm) { return spufs_dump_emit(cprm, ctx->csa.lscsa->gprs, sizeof(ctx->csa.lscsa->gprs)); } static ssize_t spufs_regs_read(struct file *file, char __user *buffer, size_t size, loff_t *pos) { int ret; struct spu_context *ctx = file->private_data; /* pre-check for file position: if we'd return EOF, there's no point * causing a deschedule */ if (*pos >= sizeof(ctx->csa.lscsa->gprs)) return 0; ret = spu_acquire_saved(ctx); if (ret) return ret; ret = simple_read_from_buffer(buffer, size, pos, ctx->csa.lscsa->gprs, sizeof(ctx->csa.lscsa->gprs)); spu_release_saved(ctx); return ret; } static ssize_t spufs_regs_write(struct file *file, const char __user *buffer, size_t size, loff_t *pos) { struct spu_context *ctx = file->private_data; struct spu_lscsa *lscsa = ctx->csa.lscsa; int ret; if (*pos >= sizeof(lscsa->gprs)) return -EFBIG; ret = spu_acquire_saved(ctx); if (ret) return ret; size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos, buffer, size); spu_release_saved(ctx); return size; } static const struct file_operations spufs_regs_fops = { .open = spufs_regs_open, .read = spufs_regs_read, .write = spufs_regs_write, .llseek = generic_file_llseek, }; static ssize_t spufs_fpcr_dump(struct spu_context *ctx, struct coredump_params *cprm) { return spufs_dump_emit(cprm, &ctx->csa.lscsa->fpcr, sizeof(ctx->csa.lscsa->fpcr)); } static ssize_t spufs_fpcr_read(struct file *file, char __user * buffer, size_t size, loff_t * pos) { int ret; struct spu_context *ctx = file->private_data; ret = spu_acquire_saved(ctx); if (ret) return ret; ret = simple_read_from_buffer(buffer, size, pos, &ctx->csa.lscsa->fpcr, sizeof(ctx->csa.lscsa->fpcr)); spu_release_saved(ctx); return ret; } static ssize_t spufs_fpcr_write(struct file *file, const char __user * buffer, size_t size, loff_t * pos) { struct spu_context *ctx = file->private_data; struct spu_lscsa *lscsa = ctx->csa.lscsa; int ret; if (*pos >= sizeof(lscsa->fpcr)) return -EFBIG; ret = spu_acquire_saved(ctx); if (ret) return ret; size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos, buffer, size); spu_release_saved(ctx); return size; } static const struct file_operations spufs_fpcr_fops = { .open = spufs_regs_open, .read = spufs_fpcr_read, .write = spufs_fpcr_write, .llseek = generic_file_llseek, }; /* generic open function for all pipe-like files */ static int spufs_pipe_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); file->private_data = i->i_ctx; return stream_open(inode, file); } /* * Read as many bytes from the mailbox as possible, until * one of the conditions becomes true: * * - no more data available in the mailbox * - end of the user provided buffer * - end of the mapped area */ static ssize_t spufs_mbox_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; u32 mbox_data, __user *udata = (void __user *)buf; ssize_t count; if (len < 4) return -EINVAL; count = spu_acquire(ctx); if (count) return count; for (count = 0; (count + 4) <= len; count += 4, udata++) { int ret; ret = ctx->ops->mbox_read(ctx, &mbox_data); if (ret == 0) break; /* * at the end of the mapped area, we can fault * but still need to return the data we have * read successfully so far. */ ret = put_user(mbox_data, udata); if (ret) { if (!count) count = -EFAULT; break; } } spu_release(ctx); if (!count) count = -EAGAIN; return count; } static const struct file_operations spufs_mbox_fops = { .open = spufs_pipe_open, .read = spufs_mbox_read, .llseek = no_llseek, }; static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; ssize_t ret; u32 mbox_stat; if (len < 4) return -EINVAL; ret = spu_acquire(ctx); if (ret) return ret; mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; spu_release(ctx); if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) return -EFAULT; return 4; } static const struct file_operations spufs_mbox_stat_fops = { .open = spufs_pipe_open, .read = spufs_mbox_stat_read, .llseek = no_llseek, }; /* low-level ibox access function */ size_t spu_ibox_read(struct spu_context *ctx, u32 *data) { return ctx->ops->ibox_read(ctx, data); } /* interrupt-level ibox callback function. */ void spufs_ibox_callback(struct spu *spu) { struct spu_context *ctx = spu->ctx; if (ctx) wake_up_all(&ctx->ibox_wq); } /* * Read as many bytes from the interrupt mailbox as possible, until * one of the conditions becomes true: * * - no more data available in the mailbox * - end of the user provided buffer * - end of the mapped area * * If the file is opened without O_NONBLOCK, we wait here until * any data is available, but return when we have been able to * read something. */ static ssize_t spufs_ibox_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; u32 ibox_data, __user *udata = (void __user *)buf; ssize_t count; if (len < 4) return -EINVAL; count = spu_acquire(ctx); if (count) goto out; /* wait only for the first element */ count = 0; if (file->f_flags & O_NONBLOCK) { if (!spu_ibox_read(ctx, &ibox_data)) { count = -EAGAIN; goto out_unlock; } } else { count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); if (count) goto out; } /* if we can't write at all, return -EFAULT */ count = put_user(ibox_data, udata); if (count) goto out_unlock; for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { int ret; ret = ctx->ops->ibox_read(ctx, &ibox_data); if (ret == 0) break; /* * at the end of the mapped area, we can fault * but still need to return the data we have * read successfully so far. */ ret = put_user(ibox_data, udata); if (ret) break; } out_unlock: spu_release(ctx); out: return count; } static __poll_t spufs_ibox_poll(struct file *file, poll_table *wait) { struct spu_context *ctx = file->private_data; __poll_t mask; poll_wait(file, &ctx->ibox_wq, wait); /* * For now keep this uninterruptible and also ignore the rule * that poll should not sleep. Will be fixed later. */ mutex_lock(&ctx->state_mutex); mask = ctx->ops->mbox_stat_poll(ctx, EPOLLIN | EPOLLRDNORM); spu_release(ctx); return mask; } static const struct file_operations spufs_ibox_fops = { .open = spufs_pipe_open, .read = spufs_ibox_read, .poll = spufs_ibox_poll, .llseek = no_llseek, }; static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; ssize_t ret; u32 ibox_stat; if (len < 4) return -EINVAL; ret = spu_acquire(ctx); if (ret) return ret; ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; spu_release(ctx); if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) return -EFAULT; return 4; } static const struct file_operations spufs_ibox_stat_fops = { .open = spufs_pipe_open, .read = spufs_ibox_stat_read, .llseek = no_llseek, }; /* low-level mailbox write */ size_t spu_wbox_write(struct spu_context *ctx, u32 data) { return ctx->ops->wbox_write(ctx, data); } /* interrupt-level wbox callback function. */ void spufs_wbox_callback(struct spu *spu) { struct spu_context *ctx = spu->ctx; if (ctx) wake_up_all(&ctx->wbox_wq); } /* * Write as many bytes to the interrupt mailbox as possible, until * one of the conditions becomes true: * * - the mailbox is full * - end of the user provided buffer * - end of the mapped area * * If the file is opened without O_NONBLOCK, we wait here until * space is available, but return when we have been able to * write something. */ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; u32 wbox_data, __user *udata = (void __user *)buf; ssize_t count; if (len < 4) return -EINVAL; if (get_user(wbox_data, udata)) return -EFAULT; count = spu_acquire(ctx); if (count) goto out; /* * make sure we can at least write one element, by waiting * in case of !O_NONBLOCK */ count = 0; if (file->f_flags & O_NONBLOCK) { if (!spu_wbox_write(ctx, wbox_data)) { count = -EAGAIN; goto out_unlock; } } else { count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); if (count) goto out; } /* write as much as possible */ for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) { int ret; ret = get_user(wbox_data, udata); if (ret) break; ret = spu_wbox_write(ctx, wbox_data); if (ret == 0) break; } out_unlock: spu_release(ctx); out: return count; } static __poll_t spufs_wbox_poll(struct file *file, poll_table *wait) { struct spu_context *ctx = file->private_data; __poll_t mask; poll_wait(file, &ctx->wbox_wq, wait); /* * For now keep this uninterruptible and also ignore the rule * that poll should not sleep. Will be fixed later. */ mutex_lock(&ctx->state_mutex); mask = ctx->ops->mbox_stat_poll(ctx, EPOLLOUT | EPOLLWRNORM); spu_release(ctx); return mask; } static const struct file_operations spufs_wbox_fops = { .open = spufs_pipe_open, .write = spufs_wbox_write, .poll = spufs_wbox_poll, .llseek = no_llseek, }; static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; ssize_t ret; u32 wbox_stat; if (len < 4) return -EINVAL; ret = spu_acquire(ctx); if (ret) return ret; wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; spu_release(ctx); if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) return -EFAULT; return 4; } static const struct file_operations spufs_wbox_stat_fops = { .open = spufs_pipe_open, .read = spufs_wbox_stat_read, .llseek = no_llseek, }; static int spufs_signal1_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->signal1 = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } static int spufs_signal1_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->signal1 = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } static ssize_t spufs_signal1_dump(struct spu_context *ctx, struct coredump_params *cprm) { if (!ctx->csa.spu_chnlcnt_RW[3]) return 0; return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[3], sizeof(ctx->csa.spu_chnldata_RW[3])); } static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, size_t len) { if (len < sizeof(ctx->csa.spu_chnldata_RW[3])) return -EINVAL; if (!ctx->csa.spu_chnlcnt_RW[3]) return 0; if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[3], sizeof(ctx->csa.spu_chnldata_RW[3]))) return -EFAULT; return sizeof(ctx->csa.spu_chnldata_RW[3]); } static ssize_t spufs_signal1_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { int ret; struct spu_context *ctx = file->private_data; ret = spu_acquire_saved(ctx); if (ret) return ret; ret = __spufs_signal1_read(ctx, buf, len); spu_release_saved(ctx); return ret; } static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx; ssize_t ret; u32 data; ctx = file->private_data; if (len < 4) return -EINVAL; if (copy_from_user(&data, buf, 4)) return -EFAULT; ret = spu_acquire(ctx); if (ret) return ret; ctx->ops->signal1_write(ctx, data); spu_release(ctx); return 4; } static vm_fault_t spufs_signal1_mmap_fault(struct vm_fault *vmf) { #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 return spufs_ps_fault(vmf, 0x14000, SPUFS_SIGNAL_MAP_SIZE); #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole * signal 1 and 2 area */ return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); #else #error unsupported page size #endif } static const struct vm_operations_struct spufs_signal1_mmap_vmops = { .fault = spufs_signal1_mmap_fault, }; static int spufs_signal1_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_signal1_mmap_vmops; return 0; } static const struct file_operations spufs_signal1_fops = { .open = spufs_signal1_open, .release = spufs_signal1_release, .read = spufs_signal1_read, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, .llseek = no_llseek, }; static const struct file_operations spufs_signal1_nosched_fops = { .open = spufs_signal1_open, .release = spufs_signal1_release, .write = spufs_signal1_write, .mmap = spufs_signal1_mmap, .llseek = no_llseek, }; static int spufs_signal2_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->signal2 = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } static int spufs_signal2_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->signal2 = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } static ssize_t spufs_signal2_dump(struct spu_context *ctx, struct coredump_params *cprm) { if (!ctx->csa.spu_chnlcnt_RW[4]) return 0; return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[4], sizeof(ctx->csa.spu_chnldata_RW[4])); } static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, size_t len) { if (len < sizeof(ctx->csa.spu_chnldata_RW[4])) return -EINVAL; if (!ctx->csa.spu_chnlcnt_RW[4]) return 0; if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[4], sizeof(ctx->csa.spu_chnldata_RW[4]))) return -EFAULT; return sizeof(ctx->csa.spu_chnldata_RW[4]); } static ssize_t spufs_signal2_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; ret = __spufs_signal2_read(ctx, buf, len); spu_release_saved(ctx); return ret; } static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx; ssize_t ret; u32 data; ctx = file->private_data; if (len < 4) return -EINVAL; if (copy_from_user(&data, buf, 4)) return -EFAULT; ret = spu_acquire(ctx); if (ret) return ret; ctx->ops->signal2_write(ctx, data); spu_release(ctx); return 4; } #if SPUFS_MMAP_4K static vm_fault_t spufs_signal2_mmap_fault(struct vm_fault *vmf) { #if SPUFS_SIGNAL_MAP_SIZE == 0x1000 return spufs_ps_fault(vmf, 0x1c000, SPUFS_SIGNAL_MAP_SIZE); #elif SPUFS_SIGNAL_MAP_SIZE == 0x10000 /* For 64k pages, both signal1 and signal2 can be used to mmap the whole * signal 1 and 2 area */ return spufs_ps_fault(vmf, 0x10000, SPUFS_SIGNAL_MAP_SIZE); #else #error unsupported page size #endif } static const struct vm_operations_struct spufs_signal2_mmap_vmops = { .fault = spufs_signal2_mmap_fault, }; static int spufs_signal2_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_signal2_mmap_vmops; return 0; } #else /* SPUFS_MMAP_4K */ #define spufs_signal2_mmap NULL #endif /* !SPUFS_MMAP_4K */ static const struct file_operations spufs_signal2_fops = { .open = spufs_signal2_open, .release = spufs_signal2_release, .read = spufs_signal2_read, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, .llseek = no_llseek, }; static const struct file_operations spufs_signal2_nosched_fops = { .open = spufs_signal2_open, .release = spufs_signal2_release, .write = spufs_signal2_write, .mmap = spufs_signal2_mmap, .llseek = no_llseek, }; /* * This is a wrapper around DEFINE_SIMPLE_ATTRIBUTE which does the * work of acquiring (or not) the SPU context before calling through * to the actual get routine. The set routine is called directly. */ #define SPU_ATTR_NOACQUIRE 0 #define SPU_ATTR_ACQUIRE 1 #define SPU_ATTR_ACQUIRE_SAVED 2 #define DEFINE_SPUFS_ATTRIBUTE(__name, __get, __set, __fmt, __acquire) \ static int __##__get(void *data, u64 *val) \ { \ struct spu_context *ctx = data; \ int ret = 0; \ \ if (__acquire == SPU_ATTR_ACQUIRE) { \ ret = spu_acquire(ctx); \ if (ret) \ return ret; \ *val = __get(ctx); \ spu_release(ctx); \ } else if (__acquire == SPU_ATTR_ACQUIRE_SAVED) { \ ret = spu_acquire_saved(ctx); \ if (ret) \ return ret; \ *val = __get(ctx); \ spu_release_saved(ctx); \ } else \ *val = __get(ctx); \ \ return 0; \ } \ DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__name, __##__get, __set, __fmt); static int spufs_signal1_type_set(void *data, u64 val) { struct spu_context *ctx = data; int ret; ret = spu_acquire(ctx); if (ret) return ret; ctx->ops->signal1_type_set(ctx, val); spu_release(ctx); return 0; } static u64 spufs_signal1_type_get(struct spu_context *ctx) { return ctx->ops->signal1_type_get(ctx); } DEFINE_SPUFS_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, spufs_signal1_type_set, "%llu\n", SPU_ATTR_ACQUIRE); static int spufs_signal2_type_set(void *data, u64 val) { struct spu_context *ctx = data; int ret; ret = spu_acquire(ctx); if (ret) return ret; ctx->ops->signal2_type_set(ctx, val); spu_release(ctx); return 0; } static u64 spufs_signal2_type_get(struct spu_context *ctx) { return ctx->ops->signal2_type_get(ctx); } DEFINE_SPUFS_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, spufs_signal2_type_set, "%llu\n", SPU_ATTR_ACQUIRE); #if SPUFS_MMAP_4K static vm_fault_t spufs_mss_mmap_fault(struct vm_fault *vmf) { return spufs_ps_fault(vmf, 0x0000, SPUFS_MSS_MAP_SIZE); } static const struct vm_operations_struct spufs_mss_mmap_vmops = { .fault = spufs_mss_mmap_fault, }; /* * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. */ static int spufs_mss_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_mss_mmap_vmops; return 0; } #else /* SPUFS_MMAP_4K */ #define spufs_mss_mmap NULL #endif /* !SPUFS_MMAP_4K */ static int spufs_mss_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; file->private_data = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!i->i_openers++) ctx->mss = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } static int spufs_mss_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->mss = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } static const struct file_operations spufs_mss_fops = { .open = spufs_mss_open, .release = spufs_mss_release, .mmap = spufs_mss_mmap, .llseek = no_llseek, }; static vm_fault_t spufs_psmap_mmap_fault(struct vm_fault *vmf) { return spufs_ps_fault(vmf, 0x0000, SPUFS_PS_MAP_SIZE); } static const struct vm_operations_struct spufs_psmap_mmap_vmops = { .fault = spufs_psmap_mmap_fault, }; /* * mmap support for full problem state area [0x00000 - 0x1ffff]. */ static int spufs_psmap_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_psmap_mmap_vmops; return 0; } static int spufs_psmap_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); file->private_data = i->i_ctx; if (!i->i_openers++) ctx->psmap = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } static int spufs_psmap_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->psmap = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } static const struct file_operations spufs_psmap_fops = { .open = spufs_psmap_open, .release = spufs_psmap_release, .mmap = spufs_psmap_mmap, .llseek = no_llseek, }; #if SPUFS_MMAP_4K static vm_fault_t spufs_mfc_mmap_fault(struct vm_fault *vmf) { return spufs_ps_fault(vmf, 0x3000, SPUFS_MFC_MAP_SIZE); } static const struct vm_operations_struct spufs_mfc_mmap_vmops = { .fault = spufs_mfc_mmap_fault, }; /* * mmap support for problem state MFC DMA area [0x0000 - 0x0fff]. */ static int spufs_mfc_mmap(struct file *file, struct vm_area_struct *vma) { if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; vm_flags_set(vma, VM_IO | VM_PFNMAP); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_ops = &spufs_mfc_mmap_vmops; return 0; } #else /* SPUFS_MMAP_4K */ #define spufs_mfc_mmap NULL #endif /* !SPUFS_MMAP_4K */ static int spufs_mfc_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; /* we don't want to deal with DMA into other processes */ if (ctx->owner != current->mm) return -EINVAL; if (atomic_read(&inode->i_count) != 1) return -EBUSY; mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->mfc = inode->i_mapping; mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } static int spufs_mfc_release(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->mfc = NULL; mutex_unlock(&ctx->mapping_lock); return 0; } /* interrupt-level mfc callback function. */ void spufs_mfc_callback(struct spu *spu) { struct spu_context *ctx = spu->ctx; if (ctx) wake_up_all(&ctx->mfc_wq); } static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) { /* See if there is one tag group is complete */ /* FIXME we need locking around tagwait */ *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; ctx->tagwait &= ~*status; if (*status) return 1; /* enable interrupt waiting for any tag group, may silently fail if interrupts are already enabled */ ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); return 0; } static ssize_t spufs_mfc_read(struct file *file, char __user *buffer, size_t size, loff_t *pos) { struct spu_context *ctx = file->private_data; int ret = -EINVAL; u32 status; if (size != 4) goto out; ret = spu_acquire(ctx); if (ret) return ret; ret = -EINVAL; if (file->f_flags & O_NONBLOCK) { status = ctx->ops->read_mfc_tagstatus(ctx); if (!(status & ctx->tagwait)) ret = -EAGAIN; else /* XXX(hch): shouldn't we clear ret here? */ ctx->tagwait &= ~status; } else { ret = spufs_wait(ctx->mfc_wq, spufs_read_mfc_tagstatus(ctx, &status)); if (ret) goto out; } spu_release(ctx); ret = 4; if (copy_to_user(buffer, &status, 4)) ret = -EFAULT; out: return ret; } static int spufs_check_valid_dma(struct mfc_dma_command *cmd) { pr_debug("queueing DMA %x %llx %x %x %x\n", cmd->lsa, cmd->ea, cmd->size, cmd->tag, cmd->cmd); switch (cmd->cmd) { case MFC_PUT_CMD: case MFC_PUTF_CMD: case MFC_PUTB_CMD: case MFC_GET_CMD: case MFC_GETF_CMD: case MFC_GETB_CMD: break; default: pr_debug("invalid DMA opcode %x\n", cmd->cmd); return -EIO; } if ((cmd->lsa & 0xf) != (cmd->ea &0xf)) { pr_debug("invalid DMA alignment, ea %llx lsa %x\n", cmd->ea, cmd->lsa); return -EIO; } switch (cmd->size & 0xf) { case 1: break; case 2: if (cmd->lsa & 1) goto error; break; case 4: if (cmd->lsa & 3) goto error; break; case 8: if (cmd->lsa & 7) goto error; break; case 0: if (cmd->lsa & 15) goto error; break; error: default: pr_debug("invalid DMA alignment %x for size %x\n", cmd->lsa & 0xf, cmd->size); return -EIO; } if (cmd->size > 16 * 1024) { pr_debug("invalid DMA size %x\n", cmd->size); return -EIO; } if (cmd->tag & 0xfff0) { /* we reserve the higher tag numbers for kernel use */ pr_debug("invalid DMA tag\n"); return -EIO; } if (cmd->class) { /* not supported in this version */ pr_debug("invalid DMA class\n"); return -EIO; } return 0; } static int spu_send_mfc_command(struct spu_context *ctx, struct mfc_dma_command cmd, int *error) { *error = ctx->ops->send_mfc_command(ctx, &cmd); if (*error == -EAGAIN) { /* wait for any tag group to complete so we have space for the new command */ ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); /* try again, because the queue might be empty again */ *error = ctx->ops->send_mfc_command(ctx, &cmd); if (*error == -EAGAIN) return 0; } return 1; } static ssize_t spufs_mfc_write(struct file *file, const char __user *buffer, size_t size, loff_t *pos) { struct spu_context *ctx = file->private_data; struct mfc_dma_command cmd; int ret = -EINVAL; if (size != sizeof cmd) goto out; ret = -EFAULT; if (copy_from_user(&cmd, buffer, sizeof cmd)) goto out; ret = spufs_check_valid_dma(&cmd); if (ret) goto out; ret = spu_acquire(ctx); if (ret) goto out; ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); if (ret) goto out; if (file->f_flags & O_NONBLOCK) { ret = ctx->ops->send_mfc_command(ctx, &cmd); } else { int status; ret = spufs_wait(ctx->mfc_wq, spu_send_mfc_command(ctx, cmd, &status)); if (ret) goto out; if (status) ret = status; } if (ret) goto out_unlock; ctx->tagwait |= 1 << cmd.tag; ret = size; out_unlock: spu_release(ctx); out: return ret; } static __poll_t spufs_mfc_poll(struct file *file,poll_table *wait) { struct spu_context *ctx = file->private_data; u32 free_elements, tagstatus; __poll_t mask; poll_wait(file, &ctx->mfc_wq, wait); /* * For now keep this uninterruptible and also ignore the rule * that poll should not sleep. Will be fixed later. */ mutex_lock(&ctx->state_mutex); ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); free_elements = ctx->ops->get_mfc_free_elements(ctx); tagstatus = ctx->ops->read_mfc_tagstatus(ctx); spu_release(ctx); mask = 0; if (free_elements & 0xffff) mask |= EPOLLOUT | EPOLLWRNORM; if (tagstatus & ctx->tagwait) mask |= EPOLLIN | EPOLLRDNORM; pr_debug("%s: free %d tagstatus %d tagwait %d\n", __func__, free_elements, tagstatus, ctx->tagwait); return mask; } static int spufs_mfc_flush(struct file *file, fl_owner_t id) { struct spu_context *ctx = file->private_data; int ret; ret = spu_acquire(ctx); if (ret) goto out; #if 0 /* this currently hangs */ ret = spufs_wait(ctx->mfc_wq, ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); if (ret) goto out; ret = spufs_wait(ctx->mfc_wq, ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); if (ret) goto out; #else ret = 0; #endif spu_release(ctx); out: return ret; } static int spufs_mfc_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file_inode(file); int err = file_write_and_wait_range(file, start, end); if (!err) { inode_lock(inode); err = spufs_mfc_flush(file, NULL); inode_unlock(inode); } return err; } static const struct file_operations spufs_mfc_fops = { .open = spufs_mfc_open, .release = spufs_mfc_release, .read = spufs_mfc_read, .write = spufs_mfc_write, .poll = spufs_mfc_poll, .flush = spufs_mfc_flush, .fsync = spufs_mfc_fsync, .mmap = spufs_mfc_mmap, .llseek = no_llseek, }; static int spufs_npc_set(void *data, u64 val) { struct spu_context *ctx = data; int ret; ret = spu_acquire(ctx); if (ret) return ret; ctx->ops->npc_write(ctx, val); spu_release(ctx); return 0; } static u64 spufs_npc_get(struct spu_context *ctx) { return ctx->ops->npc_read(ctx); } DEFINE_SPUFS_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "0x%llx\n", SPU_ATTR_ACQUIRE); static int spufs_decr_set(void *data, u64 val) { struct spu_context *ctx = data; struct spu_lscsa *lscsa = ctx->csa.lscsa; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; lscsa->decr.slot[0] = (u32) val; spu_release_saved(ctx); return 0; } static u64 spufs_decr_get(struct spu_context *ctx) { struct spu_lscsa *lscsa = ctx->csa.lscsa; return lscsa->decr.slot[0]; } DEFINE_SPUFS_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); static int spufs_decr_status_set(void *data, u64 val) { struct spu_context *ctx = data; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; if (val) ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; else ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; spu_release_saved(ctx); return 0; } static u64 spufs_decr_status_get(struct spu_context *ctx) { if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) return SPU_DECR_STATUS_RUNNING; else return 0; } DEFINE_SPUFS_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, spufs_decr_status_set, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); static int spufs_event_mask_set(void *data, u64 val) { struct spu_context *ctx = data; struct spu_lscsa *lscsa = ctx->csa.lscsa; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; lscsa->event_mask.slot[0] = (u32) val; spu_release_saved(ctx); return 0; } static u64 spufs_event_mask_get(struct spu_context *ctx) { struct spu_lscsa *lscsa = ctx->csa.lscsa; return lscsa->event_mask.slot[0]; } DEFINE_SPUFS_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, spufs_event_mask_set, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); static u64 spufs_event_status_get(struct spu_context *ctx) { struct spu_state *state = &ctx->csa; u64 stat; stat = state->spu_chnlcnt_RW[0]; if (stat) return state->spu_chnldata_RW[0]; return 0; } DEFINE_SPUFS_ATTRIBUTE(spufs_event_status_ops, spufs_event_status_get, NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) static int spufs_srr0_set(void *data, u64 val) { struct spu_context *ctx = data; struct spu_lscsa *lscsa = ctx->csa.lscsa; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; lscsa->srr0.slot[0] = (u32) val; spu_release_saved(ctx); return 0; } static u64 spufs_srr0_get(struct spu_context *ctx) { struct spu_lscsa *lscsa = ctx->csa.lscsa; return lscsa->srr0.slot[0]; } DEFINE_SPUFS_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED) static u64 spufs_id_get(struct spu_context *ctx) { u64 num; if (ctx->state == SPU_STATE_RUNNABLE) num = ctx->spu->number; else num = (unsigned int)-1; return num; } DEFINE_SPUFS_ATTRIBUTE(spufs_id_ops, spufs_id_get, NULL, "0x%llx\n", SPU_ATTR_ACQUIRE) static u64 spufs_object_id_get(struct spu_context *ctx) { /* FIXME: Should there really be no locking here? */ return ctx->object_id; } static int spufs_object_id_set(void *data, u64 id) { struct spu_context *ctx = data; ctx->object_id = id; return 0; } DEFINE_SPUFS_ATTRIBUTE(spufs_object_id_ops, spufs_object_id_get, spufs_object_id_set, "0x%llx\n", SPU_ATTR_NOACQUIRE); static u64 spufs_lslr_get(struct spu_context *ctx) { return ctx->csa.priv2.spu_lslr_RW; } DEFINE_SPUFS_ATTRIBUTE(spufs_lslr_ops, spufs_lslr_get, NULL, "0x%llx\n", SPU_ATTR_ACQUIRE_SAVED); static int spufs_info_open(struct inode *inode, struct file *file) { struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; file->private_data = ctx; return 0; } static int spufs_caps_show(struct seq_file *s, void *private) { struct spu_context *ctx = s->private; if (!(ctx->flags & SPU_CREATE_NOSCHED)) seq_puts(s, "sched\n"); if (!(ctx->flags & SPU_CREATE_ISOLATE)) seq_puts(s, "step\n"); return 0; } static int spufs_caps_open(struct inode *inode, struct file *file) { return single_open(file, spufs_caps_show, SPUFS_I(inode)->i_ctx); } static const struct file_operations spufs_caps_fops = { .open = spufs_caps_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static ssize_t spufs_mbox_info_dump(struct spu_context *ctx, struct coredump_params *cprm) { if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) return 0; return spufs_dump_emit(cprm, &ctx->csa.prob.pu_mb_R, sizeof(ctx->csa.prob.pu_mb_R)); } static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; u32 stat, data; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); stat = ctx->csa.prob.mb_stat_R; data = ctx->csa.prob.pu_mb_R; spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); /* EOF if there's no entry in the mbox */ if (!(stat & 0x0000ff)) return 0; return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); } static const struct file_operations spufs_mbox_info_fops = { .open = spufs_info_open, .read = spufs_mbox_info_read, .llseek = generic_file_llseek, }; static ssize_t spufs_ibox_info_dump(struct spu_context *ctx, struct coredump_params *cprm) { if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) return 0; return spufs_dump_emit(cprm, &ctx->csa.priv2.puint_mb_R, sizeof(ctx->csa.priv2.puint_mb_R)); } static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; u32 stat, data; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); stat = ctx->csa.prob.mb_stat_R; data = ctx->csa.priv2.puint_mb_R; spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); /* EOF if there's no entry in the ibox */ if (!(stat & 0xff0000)) return 0; return simple_read_from_buffer(buf, len, pos, &data, sizeof(data)); } static const struct file_operations spufs_ibox_info_fops = { .open = spufs_info_open, .read = spufs_ibox_info_read, .llseek = generic_file_llseek, }; static size_t spufs_wbox_info_cnt(struct spu_context *ctx) { return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32); } static ssize_t spufs_wbox_info_dump(struct spu_context *ctx, struct coredump_params *cprm) { return spufs_dump_emit(cprm, &ctx->csa.spu_mailbox_data, spufs_wbox_info_cnt(ctx)); } static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)]; int ret, count; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); count = spufs_wbox_info_cnt(ctx); memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data)); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); return simple_read_from_buffer(buf, len, pos, &data, count * sizeof(u32)); } static const struct file_operations spufs_wbox_info_fops = { .open = spufs_info_open, .read = spufs_wbox_info_read, .llseek = generic_file_llseek, }; static void spufs_get_dma_info(struct spu_context *ctx, struct spu_dma_info *info) { int i; info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; info->dma_info_status = ctx->csa.spu_chnldata_RW[24]; info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; for (i = 0; i < 16; i++) { struct mfc_cq_sr *qp = &info->dma_info_command_data[i]; struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i]; qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW; qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW; qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW; qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW; } } static ssize_t spufs_dma_info_dump(struct spu_context *ctx, struct coredump_params *cprm) { struct spu_dma_info info; spufs_get_dma_info(ctx, &info); return spufs_dump_emit(cprm, &info, sizeof(info)); } static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; struct spu_dma_info info; int ret; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); spufs_get_dma_info(ctx, &info); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); return simple_read_from_buffer(buf, len, pos, &info, sizeof(info)); } static const struct file_operations spufs_dma_info_fops = { .open = spufs_info_open, .read = spufs_dma_info_read, .llseek = no_llseek, }; static void spufs_get_proxydma_info(struct spu_context *ctx, struct spu_proxydma_info *info) { int i; info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW; info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; for (i = 0; i < 8; i++) { struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i]; struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i]; qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW; qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW; qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW; qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW; } } static ssize_t spufs_proxydma_info_dump(struct spu_context *ctx, struct coredump_params *cprm) { struct spu_proxydma_info info; spufs_get_proxydma_info(ctx, &info); return spufs_dump_emit(cprm, &info, sizeof(info)); } static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, size_t len, loff_t *pos) { struct spu_context *ctx = file->private_data; struct spu_proxydma_info info; int ret; if (len < sizeof(info)) return -EINVAL; ret = spu_acquire_saved(ctx); if (ret) return ret; spin_lock(&ctx->csa.register_lock); spufs_get_proxydma_info(ctx, &info); spin_unlock(&ctx->csa.register_lock); spu_release_saved(ctx); return simple_read_from_buffer(buf, len, pos, &info, sizeof(info)); } static const struct file_operations spufs_proxydma_info_fops = { .open = spufs_info_open, .read = spufs_proxydma_info_read, .llseek = no_llseek, }; static int spufs_show_tid(struct seq_file *s, void *private) { struct spu_context *ctx = s->private; seq_printf(s, "%d\n", ctx->tid); return 0; } static int spufs_tid_open(struct inode *inode, struct file *file) { return single_open(file, spufs_show_tid, SPUFS_I(inode)->i_ctx); } static const struct file_operations spufs_tid_fops = { .open = spufs_tid_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const char *ctx_state_names[] = { "user", "system", "iowait", "loaded" }; static unsigned long long spufs_acct_time(struct spu_context *ctx, enum spu_utilization_state state) { unsigned long long time = ctx->stats.times[state]; /* * In general, utilization statistics are updated by the controlling * thread as the spu context moves through various well defined * state transitions, but if the context is lazily loaded its * utilization statistics are not updated as the controlling thread * is not tightly coupled with the execution of the spu context. We * calculate and apply the time delta from the last recorded state * of the spu context. */ if (ctx->spu && ctx->stats.util_state == state) { time += ktime_get_ns() - ctx->stats.tstamp; } return time / NSEC_PER_MSEC; } static unsigned long long spufs_slb_flts(struct spu_context *ctx) { unsigned long long slb_flts = ctx->stats.slb_flt; if (ctx->state == SPU_STATE_RUNNABLE) { slb_flts += (ctx->spu->stats.slb_flt - ctx->stats.slb_flt_base); } return slb_flts; } static unsigned long long spufs_class2_intrs(struct spu_context *ctx) { unsigned long long class2_intrs = ctx->stats.class2_intr; if (ctx->state == SPU_STATE_RUNNABLE) { class2_intrs += (ctx->spu->stats.class2_intr - ctx->stats.class2_intr_base); } return class2_intrs; } static int spufs_show_stat(struct seq_file *s, void *private) { struct spu_context *ctx = s->private; int ret; ret = spu_acquire(ctx); if (ret) return ret; seq_printf(s, "%s %llu %llu %llu %llu " "%llu %llu %llu %llu %llu %llu %llu %llu\n", ctx_state_names[ctx->stats.util_state], spufs_acct_time(ctx, SPU_UTIL_USER), spufs_acct_time(ctx, SPU_UTIL_SYSTEM), spufs_acct_time(ctx, SPU_UTIL_IOWAIT), spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), ctx->stats.vol_ctx_switch, ctx->stats.invol_ctx_switch, spufs_slb_flts(ctx), ctx->stats.hash_flt, ctx->stats.min_flt, ctx->stats.maj_flt, spufs_class2_intrs(ctx), ctx->stats.libassist); spu_release(ctx); return 0; } static int spufs_stat_open(struct inode *inode, struct file *file) { return single_open(file, spufs_show_stat, SPUFS_I(inode)->i_ctx); } static const struct file_operations spufs_stat_fops = { .open = spufs_stat_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static inline int spufs_switch_log_used(struct spu_context *ctx) { return (ctx->switch_log->head - ctx->switch_log->tail) % SWITCH_LOG_BUFSIZE; } static inline int spufs_switch_log_avail(struct spu_context *ctx) { return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx); } static int spufs_switch_log_open(struct inode *inode, struct file *file) { struct spu_context *ctx = SPUFS_I(inode)->i_ctx; int rc; rc = spu_acquire(ctx); if (rc) return rc; if (ctx->switch_log) { rc = -EBUSY; goto out; } ctx->switch_log = kmalloc(struct_size(ctx->switch_log, log, SWITCH_LOG_BUFSIZE), GFP_KERNEL); if (!ctx->switch_log) { rc = -ENOMEM; goto out; } ctx->switch_log->head = ctx->switch_log->tail = 0; init_waitqueue_head(&ctx->switch_log->wait); rc = 0; out: spu_release(ctx); return rc; } static int spufs_switch_log_release(struct inode *inode, struct file *file) { struct spu_context *ctx = SPUFS_I(inode)->i_ctx; int rc; rc = spu_acquire(ctx); if (rc) return rc; kfree(ctx->switch_log); ctx->switch_log = NULL; spu_release(ctx); return 0; } static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) { struct switch_log_entry *p; p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE; return snprintf(tbuf, n, "%llu.%09u %d %u %u %llu\n", (unsigned long long) p->tstamp.tv_sec, (unsigned int) p->tstamp.tv_nsec, p->spu_id, (unsigned int) p->type, (unsigned int) p->val, (unsigned long long) p->timebase); } static ssize_t spufs_switch_log_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { struct inode *inode = file_inode(file); struct spu_context *ctx = SPUFS_I(inode)->i_ctx; int error = 0, cnt = 0; if (!buf) return -EINVAL; error = spu_acquire(ctx); if (error) return error; while (cnt < len) { char tbuf[128]; int width; if (spufs_switch_log_used(ctx) == 0) { if (cnt > 0) { /* If there's data ready to go, we can * just return straight away */ break; } else if (file->f_flags & O_NONBLOCK) { error = -EAGAIN; break; } else { /* spufs_wait will drop the mutex and * re-acquire, but since we're in read(), the * file cannot be _released (and so * ctx->switch_log is stable). */ error = spufs_wait(ctx->switch_log->wait, spufs_switch_log_used(ctx) > 0); /* On error, spufs_wait returns without the * state mutex held */ if (error) return error; /* We may have had entries read from underneath * us while we dropped the mutex in spufs_wait, * so re-check */ if (spufs_switch_log_used(ctx) == 0) continue; } } width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); if (width < len) ctx->switch_log->tail = (ctx->switch_log->tail + 1) % SWITCH_LOG_BUFSIZE; else /* If the record is greater than space available return * partial buffer (so far) */ break; error = copy_to_user(buf + cnt, tbuf, width); if (error) break; cnt += width; } spu_release(ctx); return cnt == 0 ? error : cnt; } static __poll_t spufs_switch_log_poll(struct file *file, poll_table *wait) { struct inode *inode = file_inode(file); struct spu_context *ctx = SPUFS_I(inode)->i_ctx; __poll_t mask = 0; int rc; poll_wait(file, &ctx->switch_log->wait, wait); rc = spu_acquire(ctx); if (rc) return rc; if (spufs_switch_log_used(ctx) > 0) mask |= EPOLLIN; spu_release(ctx); return mask; } static const struct file_operations spufs_switch_log_fops = { .open = spufs_switch_log_open, .read = spufs_switch_log_read, .poll = spufs_switch_log_poll, .release = spufs_switch_log_release, .llseek = no_llseek, }; /** * Log a context switch event to a switch log reader. * * Must be called with ctx->state_mutex held. */ void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, u32 type, u32 val) { if (!ctx->switch_log) return; if (spufs_switch_log_avail(ctx) > 1) { struct switch_log_entry *p; p = ctx->switch_log->log + ctx->switch_log->head; ktime_get_ts64(&p->tstamp); p->timebase = get_tb(); p->spu_id = spu ? spu->number : -1; p->type = type; p->val = val; ctx->switch_log->head = (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; } wake_up(&ctx->switch_log->wait); } static int spufs_show_ctx(struct seq_file *s, void *private) { struct spu_context *ctx = s->private; u64 mfc_control_RW; mutex_lock(&ctx->state_mutex); if (ctx->spu) { struct spu *spu = ctx->spu; struct spu_priv2 __iomem *priv2 = spu->priv2; spin_lock_irq(&spu->register_lock); mfc_control_RW = in_be64(&priv2->mfc_control_RW); spin_unlock_irq(&spu->register_lock); } else { struct spu_state *csa = &ctx->csa; mfc_control_RW = csa->priv2.mfc_control_RW; } seq_printf(s, "%c flgs(%lx) sflgs(%lx) pri(%d) ts(%d) spu(%02d)" " %c %llx %llx %llx %llx %x %x\n", ctx->state == SPU_STATE_SAVED ? 'S' : 'R', ctx->flags, ctx->sched_flags, ctx->prio, ctx->time_slice, ctx->spu ? ctx->spu->number : -1, !list_empty(&ctx->rq) ? 'q' : ' ', ctx->csa.class_0_pending, ctx->csa.class_0_dar, ctx->csa.class_1_dsisr, mfc_control_RW, ctx->ops->runcntl_read(ctx), ctx->ops->status_read(ctx)); mutex_unlock(&ctx->state_mutex); return 0; } static int spufs_ctx_open(struct inode *inode, struct file *file) { return single_open(file, spufs_show_ctx, SPUFS_I(inode)->i_ctx); } static const struct file_operations spufs_ctx_fops = { .open = spufs_ctx_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; const struct spufs_tree_descr spufs_dir_contents[] = { { "capabilities", &spufs_caps_fops, 0444, }, { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, { "regs", &spufs_regs_fops, 0666, sizeof(struct spu_reg128[128]), }, { "mbox", &spufs_mbox_fops, 0444, }, { "ibox", &spufs_ibox_fops, 0444, }, { "wbox", &spufs_wbox_fops, 0222, }, { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, { "signal1", &spufs_signal1_fops, 0666, }, { "signal2", &spufs_signal2_fops, 0666, }, { "signal1_type", &spufs_signal1_type, 0666, }, { "signal2_type", &spufs_signal2_type, 0666, }, { "cntl", &spufs_cntl_fops, 0666, }, { "fpcr", &spufs_fpcr_fops, 0666, sizeof(struct spu_reg128), }, { "lslr", &spufs_lslr_ops, 0444, }, { "mfc", &spufs_mfc_fops, 0666, }, { "mss", &spufs_mss_fops, 0666, }, { "npc", &spufs_npc_ops, 0666, }, { "srr0", &spufs_srr0_ops, 0666, }, { "decr", &spufs_decr_ops, 0666, }, { "decr_status", &spufs_decr_status_ops, 0666, }, { "event_mask", &spufs_event_mask_ops, 0666, }, { "event_status", &spufs_event_status_ops, 0444, }, { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, { "phys-id", &spufs_id_ops, 0666, }, { "object-id", &spufs_object_id_ops, 0666, }, { "mbox_info", &spufs_mbox_info_fops, 0444, sizeof(u32), }, { "ibox_info", &spufs_ibox_info_fops, 0444, sizeof(u32), }, { "wbox_info", &spufs_wbox_info_fops, 0444, sizeof(u32), }, { "dma_info", &spufs_dma_info_fops, 0444, sizeof(struct spu_dma_info), }, { "proxydma_info", &spufs_proxydma_info_fops, 0444, sizeof(struct spu_proxydma_info)}, { "tid", &spufs_tid_fops, 0444, }, { "stat", &spufs_stat_fops, 0444, }, { "switch_log", &spufs_switch_log_fops, 0444 }, {}, }; const struct spufs_tree_descr spufs_dir_nosched_contents[] = { { "capabilities", &spufs_caps_fops, 0444, }, { "mem", &spufs_mem_fops, 0666, LS_SIZE, }, { "mbox", &spufs_mbox_fops, 0444, }, { "ibox", &spufs_ibox_fops, 0444, }, { "wbox", &spufs_wbox_fops, 0222, }, { "mbox_stat", &spufs_mbox_stat_fops, 0444, sizeof(u32), }, { "ibox_stat", &spufs_ibox_stat_fops, 0444, sizeof(u32), }, { "wbox_stat", &spufs_wbox_stat_fops, 0444, sizeof(u32), }, { "signal1", &spufs_signal1_nosched_fops, 0222, }, { "signal2", &spufs_signal2_nosched_fops, 0222, }, { "signal1_type", &spufs_signal1_type, 0666, }, { "signal2_type", &spufs_signal2_type, 0666, }, { "mss", &spufs_mss_fops, 0666, }, { "mfc", &spufs_mfc_fops, 0666, }, { "cntl", &spufs_cntl_fops, 0666, }, { "npc", &spufs_npc_ops, 0666, }, { "psmap", &spufs_psmap_fops, 0666, SPUFS_PS_MAP_SIZE, }, { "phys-id", &spufs_id_ops, 0666, }, { "object-id", &spufs_object_id_ops, 0666, }, { "tid", &spufs_tid_fops, 0444, }, { "stat", &spufs_stat_fops, 0444, }, {}, }; const struct spufs_tree_descr spufs_dir_debug_contents[] = { { ".ctx", &spufs_ctx_fops, 0444, }, {}, }; const struct spufs_coredump_reader spufs_coredump_read[] = { { "regs", spufs_regs_dump, NULL, sizeof(struct spu_reg128[128])}, { "fpcr", spufs_fpcr_dump, NULL, sizeof(struct spu_reg128) }, { "lslr", NULL, spufs_lslr_get, 19 }, { "decr", NULL, spufs_decr_get, 19 }, { "decr_status", NULL, spufs_decr_status_get, 19 }, { "mem", spufs_mem_dump, NULL, LS_SIZE, }, { "signal1", spufs_signal1_dump, NULL, sizeof(u32) }, { "signal1_type", NULL, spufs_signal1_type_get, 19 }, { "signal2", spufs_signal2_dump, NULL, sizeof(u32) }, { "signal2_type", NULL, spufs_signal2_type_get, 19 }, { "event_mask", NULL, spufs_event_mask_get, 19 }, { "event_status", NULL, spufs_event_status_get, 19 }, { "mbox_info", spufs_mbox_info_dump, NULL, sizeof(u32) }, { "ibox_info", spufs_ibox_info_dump, NULL, sizeof(u32) }, { "wbox_info", spufs_wbox_info_dump, NULL, 4 * sizeof(u32)}, { "dma_info", spufs_dma_info_dump, NULL, sizeof(struct spu_dma_info)}, { "proxydma_info", spufs_proxydma_info_dump, NULL, sizeof(struct spu_proxydma_info)}, { "object-id", NULL, spufs_object_id_get, 19 }, { "npc", NULL, spufs_npc_get, 19 }, { NULL }, };
linux-master
arch/powerpc/platforms/cell/spufs/file.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * SPU local store allocation routines * * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. */ #undef DEBUG #include <linux/kernel.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <asm/spu.h> #include <asm/spu_csa.h> #include <asm/mmu.h> #include "spufs.h" int spu_alloc_lscsa(struct spu_state *csa) { struct spu_lscsa *lscsa; unsigned char *p; lscsa = vzalloc(sizeof(*lscsa)); if (!lscsa) return -ENOMEM; csa->lscsa = lscsa; /* Set LS pages reserved to allow for user-space mapping. */ for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE) SetPageReserved(vmalloc_to_page(p)); return 0; } void spu_free_lscsa(struct spu_state *csa) { /* Clear reserved bit before vfree. */ unsigned char *p; if (csa->lscsa == NULL) return; for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE) ClearPageReserved(vmalloc_to_page(p)); vfree(csa->lscsa); }
linux-master
arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Low-level SPU handling * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 * * Author: Arnd Bergmann <[email protected]> */ #include <linux/sched/signal.h> #include <linux/mm.h> #include <asm/spu.h> #include <asm/spu_csa.h> #include "spufs.h" /** * Handle an SPE event, depending on context SPU_CREATE_EVENTS_ENABLED flag. * * If the context was created with events, we just set the return event. * Otherwise, send an appropriate signal to the process. */ static void spufs_handle_event(struct spu_context *ctx, unsigned long ea, int type) { if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { ctx->event_return |= type; wake_up_all(&ctx->stop_wq); return; } switch (type) { case SPE_EVENT_INVALID_DMA: force_sig_fault(SIGBUS, BUS_OBJERR, NULL); break; case SPE_EVENT_SPE_DATA_STORAGE: ctx->ops->restart_dma(ctx); force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *)ea); break; case SPE_EVENT_DMA_ALIGNMENT: /* DAR isn't set for an alignment fault :( */ force_sig_fault(SIGBUS, BUS_ADRALN, NULL); break; case SPE_EVENT_SPE_ERROR: force_sig_fault( SIGILL, ILL_ILLOPC, (void __user *)(unsigned long) ctx->ops->npc_read(ctx) - 4); break; } } int spufs_handle_class0(struct spu_context *ctx) { unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK; if (likely(!stat)) return 0; if (stat & CLASS0_DMA_ALIGNMENT_INTR) spufs_handle_event(ctx, ctx->csa.class_0_dar, SPE_EVENT_DMA_ALIGNMENT); if (stat & CLASS0_INVALID_DMA_COMMAND_INTR) spufs_handle_event(ctx, ctx->csa.class_0_dar, SPE_EVENT_INVALID_DMA); if (stat & CLASS0_SPU_ERROR_INTR) spufs_handle_event(ctx, ctx->csa.class_0_dar, SPE_EVENT_SPE_ERROR); ctx->csa.class_0_pending = 0; return -EIO; } /* * bottom half handler for page faults, we can't do this from * interrupt context, since we might need to sleep. * we also need to give up the mutex so we can get scheduled * out while waiting for the backing store. * * TODO: try calling hash_page from the interrupt handler first * in order to speed up the easy case. */ int spufs_handle_class1(struct spu_context *ctx) { u64 ea, dsisr, access; unsigned long flags; vm_fault_t flt = 0; int ret; /* * dar and dsisr get passed from the registers * to the spu_context, to this function, but not * back to the spu if it gets scheduled again. * * if we don't handle the fault for a saved context * in time, we can still expect to get the same fault * the immediately after the context restore. */ ea = ctx->csa.class_1_dar; dsisr = ctx->csa.class_1_dsisr; if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) return 0; spuctx_switch_state(ctx, SPU_UTIL_IOWAIT); pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea, dsisr, ctx->state); ctx->stats.hash_flt++; if (ctx->state == SPU_STATE_RUNNABLE) ctx->spu->stats.hash_flt++; /* we must not hold the lock when entering copro_handle_mm_fault */ spu_release(ctx); access = (_PAGE_PRESENT | _PAGE_READ); access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_WRITE : 0UL; local_irq_save(flags); ret = hash_page(ea, access, 0x300, dsisr); local_irq_restore(flags); /* hashing failed, so try the actual fault handler */ if (ret) ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt); /* * This is nasty: we need the state_mutex for all the bookkeeping even * if the syscall was interrupted by a signal. ewww. */ mutex_lock(&ctx->state_mutex); /* * Clear dsisr under ctxt lock after handling the fault, so that * time slicing will not preempt the context while the page fault * handler is running. Context switch code removes mappings. */ ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0; /* * If we handled the fault successfully and are in runnable * state, restart the DMA. * In case of unhandled error report the problem to user space. */ if (!ret) { if (flt & VM_FAULT_MAJOR) ctx->stats.maj_flt++; else ctx->stats.min_flt++; if (ctx->state == SPU_STATE_RUNNABLE) { if (flt & VM_FAULT_MAJOR) ctx->spu->stats.maj_flt++; else ctx->spu->stats.min_flt++; } if (ctx->spu) ctx->ops->restart_dma(ctx); } else spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); return ret; }
linux-master
arch/powerpc/platforms/cell/spufs/fault.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * MPC85xx RDB Board Setup * * Copyright 2009,2012-2013 Freescale Semiconductor Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/fsl/guts.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <soc/fsl/qe/qe.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "smp.h" #include "mpc85xx.h" static void __init mpc85xx_rdb_pic_init(void) { struct mpic *mpic; int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU; if (of_machine_is_compatible("fsl,MPC85XXRDB-CAMP")) flags |= MPIC_NO_RESET; mpic = mpic_alloc(NULL, 0, flags, 0, 256, " OpenPIC "); if (WARN_ON(!mpic)) return; mpic_init(mpic); } /* * Setup the architecture */ static void __init mpc85xx_rdb_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("mpc85xx_rdb_setup_arch()", 0); mpc85xx_smp_init(); fsl_pci_assign_primary(); mpc85xx_qe_par_io_init(); #if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE) if (machine_is(p1025_rdb)) { struct device_node *np; struct ccsr_guts __iomem *guts; np = of_find_node_by_name(NULL, "global-utilities"); if (np) { guts = of_iomap(np, 0); if (!guts) { pr_err("mpc85xx-rdb: could not map global utilities register\n"); } else { /* P1025 has pins muxed for QE and other functions. To * enable QE UEC mode, we need to set bit QE0 for UCC1 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 * and QE12 for QE MII management singals in PMUXCR * register. */ setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) | MPC85xx_PMUXCR_QE(3) | MPC85xx_PMUXCR_QE(9) | MPC85xx_PMUXCR_QE(12)); iounmap(guts); } of_node_put(np); } } #endif pr_info("MPC85xx RDB board from Freescale Semiconductor\n"); } machine_arch_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices); machine_arch_initcall(p1020_rdb, mpc85xx_common_publish_devices); machine_arch_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices); machine_arch_initcall(p1020_rdb_pd, mpc85xx_common_publish_devices); machine_arch_initcall(p1020_utm_pc, mpc85xx_common_publish_devices); machine_arch_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices); machine_arch_initcall(p1025_rdb, mpc85xx_common_publish_devices); machine_arch_initcall(p1024_rdb, mpc85xx_common_publish_devices); define_machine(p1020_rdb) { .name = "P1020 RDB", .compatible = "fsl,P1020RDB", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, }; define_machine(p1021_rdb_pc) { .name = "P1021 RDB-PC", .compatible = "fsl,P1021RDB-PC", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, }; define_machine(p1025_rdb) { .name = "P1025 RDB", .compatible = "fsl,P1025RDB", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, }; define_machine(p1020_mbg_pc) { .name = "P1020 MBG-PC", .compatible = "fsl,P1020MBG-PC", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, }; define_machine(p1020_utm_pc) { .name = "P1020 UTM-PC", .compatible = "fsl,P1020UTM-PC", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, }; define_machine(p1020_rdb_pc) { .name = "P1020RDB-PC", .compatible = "fsl,P1020RDB-PC", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, }; define_machine(p1020_rdb_pd) { .name = "P1020RDB-PD", .compatible = "fsl,P1020RDB-PD", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, }; define_machine(p1024_rdb) { .name = "P1024 RDB", .compatible = "fsl,P1024RDB", .setup_arch = mpc85xx_rdb_setup_arch, .init_IRQ = mpc85xx_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/mpc85xx_rdb.c
// SPDX-License-Identifier: GPL-2.0-only /* * Routines common to most mpc85xx-based boards. */ #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/fsl_pm.h> #include <soc/fsl/qe/qe.h> #include <sysdev/cpm2_pic.h> #include "mpc85xx.h" const struct fsl_pm_ops *qoriq_pm_ops; static const struct of_device_id mpc85xx_common_ids[] __initconst = { { .type = "soc", }, { .compatible = "soc", }, { .compatible = "simple-bus", }, { .name = "cpm", }, { .name = "localbus", }, { .compatible = "gianfar", }, { .compatible = "fsl,qe", }, { .compatible = "fsl,cpm2", }, { .compatible = "fsl,srio", }, /* So that the DMA channel nodes can be probed individually: */ { .compatible = "fsl,eloplus-dma", }, /* For the PMC driver */ { .compatible = "fsl,mpc8548-guts", }, /* Probably unnecessary? */ { .compatible = "gpio-leds", }, /* For all PCI controllers */ { .compatible = "fsl,mpc8540-pci", }, { .compatible = "fsl,mpc8548-pcie", }, { .compatible = "fsl,p1022-pcie", }, { .compatible = "fsl,p1010-pcie", }, { .compatible = "fsl,p1023-pcie", }, { .compatible = "fsl,p4080-pcie", }, { .compatible = "fsl,qoriq-pcie-v2.4", }, { .compatible = "fsl,qoriq-pcie-v2.3", }, { .compatible = "fsl,qoriq-pcie-v2.2", }, { .compatible = "fsl,fman", }, {}, }; int __init mpc85xx_common_publish_devices(void) { return of_platform_bus_probe(NULL, mpc85xx_common_ids, NULL); } #ifdef CONFIG_CPM2 static void cpm2_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); int cascade_irq; while ((cascade_irq = cpm2_get_irq()) >= 0) generic_handle_irq(cascade_irq); chip->irq_eoi(&desc->irq_data); } void __init mpc85xx_cpm2_pic_init(void) { struct device_node *np; int irq; /* Setup CPM2 PIC */ np = of_find_compatible_node(NULL, NULL, "fsl,cpm2-pic"); if (np == NULL) { printk(KERN_ERR "PIC init: can not find fsl,cpm2-pic node\n"); return; } irq = irq_of_parse_and_map(np, 0); if (!irq) { of_node_put(np); printk(KERN_ERR "PIC init: got no IRQ for cpm cascade\n"); return; } cpm2_pic_init(np); of_node_put(np); irq_set_chained_handler(irq, cpm2_cascade); } #endif #ifdef CONFIG_QUICC_ENGINE void __init mpc85xx_qe_par_io_init(void) { struct device_node *np; np = of_find_node_by_name(NULL, "par_io"); if (np) { struct device_node *ucc; par_io_init(np); of_node_put(np); for_each_node_by_name(ucc, "ucc") par_io_of_config(ucc); } } #endif
linux-master
arch/powerpc/platforms/85xx/common.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * ppa8548 setup and early boot code. * * Copyright 2009 Prodrive B.V.. * * By Stef van Os (see MAINTAINERS for contact information) * * Based on the SBC8548 support - Copyright 2007 Wind River Systems Inc. * Based on the MPC8548CDS support - Copyright 2005 Freescale Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/reboot.h> #include <linux/seq_file.h> #include <linux/of_fdt.h> #include <linux/of_platform.h> #include <asm/machdep.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> static void __init ppa8548_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } /* * Setup the architecture */ static void __init ppa8548_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("ppa8548_setup_arch()", 0); } static void ppa8548_show_cpuinfo(struct seq_file *m) { uint32_t svid, phid1; svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: Prodrive B.V.\n"); seq_printf(m, "SVR\t\t: 0x%x\n", svid); /* Display cpu Pll setting */ phid1 = mfspr(SPRN_HID1); seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } static const struct of_device_id of_bus_ids[] __initconst = { { .name = "soc", }, { .type = "soc", }, { .compatible = "simple-bus", }, { .compatible = "gianfar", }, { .compatible = "fsl,srio", }, {}, }; static int __init declare_of_platform_devices(void) { of_platform_bus_probe(NULL, of_bus_ids, NULL); return 0; } machine_device_initcall(ppa8548, declare_of_platform_devices); define_machine(ppa8548) { .name = "ppa8548", .compatible = "ppa8548", .setup_arch = ppa8548_setup_arch, .init_IRQ = ppa8548_pic_init, .show_cpuinfo = ppa8548_show_cpuinfo, .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/ppa8548.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * T1042 platform DIU operation * * Copyright 2014 Freescale Semiconductor Inc. */ #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <sysdev/fsl_soc.h> /*DIU Pixel ClockCR offset in scfg*/ #define CCSR_SCFG_PIXCLKCR 0x28 /* DIU Pixel Clock bits of the PIXCLKCR */ #define PIXCLKCR_PXCKEN 0x80000000 #define PIXCLKCR_PXCKINV 0x40000000 #define PIXCLKCR_PXCKDLY 0x0000FF00 #define PIXCLKCR_PXCLK_MASK 0x00FF0000 /* Some CPLD register definitions */ #define CPLD_DIUCSR 0x16 #define CPLD_DIUCSR_DVIEN 0x80 #define CPLD_DIUCSR_BACKLIGHT 0x0f struct device_node *cpld_node; /** * t1042rdb_set_monitor_port: switch the output to a different monitor port */ static void t1042rdb_set_monitor_port(enum fsl_diu_monitor_port port) { void __iomem *cpld_base; cpld_base = of_iomap(cpld_node, 0); if (!cpld_base) { pr_err("%s: Could not map cpld registers\n", __func__); goto exit; } switch (port) { case FSL_DIU_PORT_DVI: /* Enable the DVI(HDMI) port, disable the DFP and * the backlight */ clrbits8(cpld_base + CPLD_DIUCSR, CPLD_DIUCSR_DVIEN); break; case FSL_DIU_PORT_LVDS: /* * LVDS also needs backlight enabled, otherwise the display * will be blank. */ /* Enable the DFP port, disable the DVI*/ setbits8(cpld_base + CPLD_DIUCSR, 0x01 << 8); setbits8(cpld_base + CPLD_DIUCSR, 0x01 << 4); setbits8(cpld_base + CPLD_DIUCSR, CPLD_DIUCSR_BACKLIGHT); break; default: pr_err("%s: Unsupported monitor port %i\n", __func__, port); } iounmap(cpld_base); exit: of_node_put(cpld_node); } /** * t1042rdb_set_pixel_clock: program the DIU's clock * @pixclock: pixel clock in ps (pico seconds) */ static void t1042rdb_set_pixel_clock(unsigned int pixclock) { struct device_node *scfg_np; void __iomem *scfg; unsigned long freq; u64 temp; u32 pxclk; scfg_np = of_find_compatible_node(NULL, NULL, "fsl,t1040-scfg"); if (!scfg_np) { pr_err("%s: Missing scfg node. Can not display video.\n", __func__); return; } scfg = of_iomap(scfg_np, 0); of_node_put(scfg_np); if (!scfg) { pr_err("%s: Could not map device. Can not display video.\n", __func__); return; } /* Convert pixclock into frequency */ temp = 1000000000000ULL; do_div(temp, pixclock); freq = temp; /* * 'pxclk' is the ratio of the platform clock to the pixel clock. * This number is programmed into the PIXCLKCR register, and the valid * range of values is 2-255. */ pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq); pxclk = clamp_t(u32, pxclk, 2, 255); /* Disable the pixel clock, and set it to non-inverted and no delay */ clrbits32(scfg + CCSR_SCFG_PIXCLKCR, PIXCLKCR_PXCKEN | PIXCLKCR_PXCKDLY | PIXCLKCR_PXCLK_MASK); /* Enable the clock and set the pxclk */ setbits32(scfg + CCSR_SCFG_PIXCLKCR, PIXCLKCR_PXCKEN | (pxclk << 16)); iounmap(scfg); } /** * t1042rdb_valid_monitor_port: set the monitor port for sysfs */ static enum fsl_diu_monitor_port t1042rdb_valid_monitor_port(enum fsl_diu_monitor_port port) { switch (port) { case FSL_DIU_PORT_DVI: case FSL_DIU_PORT_LVDS: return port; default: return FSL_DIU_PORT_DVI; /* Dual-link LVDS is not supported */ } } static int __init t1042rdb_diu_init(void) { cpld_node = of_find_compatible_node(NULL, NULL, "fsl,t1042rdb-cpld"); if (!cpld_node) return 0; diu_ops.set_monitor_port = t1042rdb_set_monitor_port; diu_ops.set_pixel_clock = t1042rdb_set_pixel_clock; diu_ops.valid_monitor_port = t1042rdb_valid_monitor_port; return 0; } early_initcall(t1042rdb_diu_init); MODULE_LICENSE("GPL");
linux-master
arch/powerpc/platforms/85xx/t1042rdb_diu.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * MPC85xx DS Board Setup * * Author Xianghua Xiao ([email protected]) * Roy Zang <[email protected]> * - Add PCI/PCI Exprees support * Copyright 2007 Freescale Semiconductor Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/of_irq.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/i8259.h> #include <asm/swiotlb.h> #include <asm/ppc-pci.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "smp.h" #include "mpc85xx.h" static void __init mpc85xx_ds_pic_init(void) { struct mpic *mpic; int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU; if (of_machine_is_compatible("fsl,MPC8572DS-CAMP")) flags |= MPIC_NO_RESET; mpic = mpic_alloc(NULL, 0, flags, 0, 256, " OpenPIC "); if (WARN_ON(!mpic)) return; mpic_init(mpic); mpc85xx_8259_init(); } /* * Setup the architecture */ static void __init mpc85xx_ds_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("mpc85xx_ds_setup_arch()", 0); swiotlb_detect_4g(); fsl_pci_assign_primary(); uli_init(); mpc85xx_smp_init(); pr_info("MPC85xx DS board from Freescale Semiconductor\n"); } machine_arch_initcall(mpc8544_ds, mpc85xx_common_publish_devices); machine_arch_initcall(mpc8572_ds, mpc85xx_common_publish_devices); define_machine(mpc8544_ds) { .name = "MPC8544 DS", .compatible = "MPC8544DS", .setup_arch = mpc85xx_ds_setup_arch, .init_IRQ = mpc85xx_ds_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, }; define_machine(mpc8572_ds) { .name = "MPC8572 DS", .compatible = "fsl,MPC8572DS", .setup_arch = mpc85xx_ds_setup_arch, .init_IRQ = mpc85xx_ds_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/mpc85xx_ds.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Servergy CTS-1000 Setup * * Maintained by Ben Collins <[email protected]> * * Copyright 2012 by Servergy, Inc. */ #define pr_fmt(fmt) "gpio-halt: " fmt #include <linux/err.h> #include <linux/platform_device.h> #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of_irq.h> #include <linux/workqueue.h> #include <linux/reboot.h> #include <linux/interrupt.h> #include <asm/machdep.h> static struct gpio_desc *halt_gpio; static int halt_irq; static const struct of_device_id child_match[] = { { .compatible = "sgy,gpio-halt", }, {}, }; static void gpio_halt_wfn(struct work_struct *work) { /* Likely wont return */ orderly_poweroff(true); } static DECLARE_WORK(gpio_halt_wq, gpio_halt_wfn); static void __noreturn gpio_halt_cb(void) { pr_info("triggering GPIO.\n"); /* Probably wont return */ gpiod_set_value(halt_gpio, 1); panic("Halt failed\n"); } /* This IRQ means someone pressed the power button and it is waiting for us * to handle the shutdown/poweroff. */ static irqreturn_t gpio_halt_irq(int irq, void *__data) { struct platform_device *pdev = __data; dev_info(&pdev->dev, "scheduling shutdown due to power button IRQ\n"); schedule_work(&gpio_halt_wq); return IRQ_HANDLED; }; static int __gpio_halt_probe(struct platform_device *pdev, struct device_node *halt_node) { int err; halt_gpio = fwnode_gpiod_get_index(of_fwnode_handle(halt_node), NULL, 0, GPIOD_OUT_LOW, "gpio-halt"); err = PTR_ERR_OR_ZERO(halt_gpio); if (err) { dev_err(&pdev->dev, "failed to request halt GPIO: %d\n", err); return err; } /* Now get the IRQ which tells us when the power button is hit */ halt_irq = irq_of_parse_and_map(halt_node, 0); err = request_irq(halt_irq, gpio_halt_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "gpio-halt", pdev); if (err) { dev_err(&pdev->dev, "failed to request IRQ %d: %d\n", halt_irq, err); gpiod_put(halt_gpio); halt_gpio = NULL; return err; } /* Register our halt function */ ppc_md.halt = gpio_halt_cb; pm_power_off = gpio_halt_cb; dev_info(&pdev->dev, "registered halt GPIO, irq: %d\n", halt_irq); return 0; } static int gpio_halt_probe(struct platform_device *pdev) { struct device_node *halt_node; int ret; if (!pdev->dev.of_node) return -ENODEV; /* If there's no matching child, this isn't really an error */ halt_node = of_find_matching_node(pdev->dev.of_node, child_match); if (!halt_node) return -ENODEV; ret = __gpio_halt_probe(pdev, halt_node); of_node_put(halt_node); return ret; } static int gpio_halt_remove(struct platform_device *pdev) { free_irq(halt_irq, pdev); cancel_work_sync(&gpio_halt_wq); ppc_md.halt = NULL; pm_power_off = NULL; gpiod_put(halt_gpio); halt_gpio = NULL; return 0; } static const struct of_device_id gpio_halt_match[] = { /* We match on the gpio bus itself and scan the children since they * wont be matched against us. We know the bus wont match until it * has been registered too. */ { .compatible = "fsl,qoriq-gpio", }, {}, }; MODULE_DEVICE_TABLE(of, gpio_halt_match); static struct platform_driver gpio_halt_driver = { .driver = { .name = "gpio-halt", .of_match_table = gpio_halt_match, }, .probe = gpio_halt_probe, .remove = gpio_halt_remove, }; module_platform_driver(gpio_halt_driver); MODULE_DESCRIPTION("Driver to support GPIO triggered system halt for Servergy CTS-1000 Systems."); MODULE_VERSION("1.0"); MODULE_AUTHOR("Ben Collins <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
arch/powerpc/platforms/85xx/sgy_cts1000.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Paravirt target for a generic QEMU e500 machine * * This is intended to be a flexible device-tree-driven platform, not fixed * to a particular piece of hardware or a particular spec of virtual hardware, * beyond the assumption of an e500-family CPU. Some things are still hardcoded * here, such as MPIC, but this is a limitation of the current code rather than * an interface contract with QEMU. * * Copyright 2012 Freescale Semiconductor Inc. */ #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_fdt.h> #include <linux/pgtable.h> #include <asm/machdep.h> #include <asm/time.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/swiotlb.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "smp.h" #include "mpc85xx.h" static void __init qemu_e500_pic_init(void) { struct mpic *mpic; unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU | MPIC_ENABLE_COREINT; mpic = mpic_alloc(NULL, 0, flags, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } static void __init qemu_e500_setup_arch(void) { ppc_md.progress("qemu_e500_setup_arch()", 0); fsl_pci_assign_primary(); swiotlb_detect_4g(); mpc85xx_smp_init(); } machine_arch_initcall(qemu_e500, mpc85xx_common_publish_devices); define_machine(qemu_e500) { .name = "QEMU e500", .compatible = "fsl,qemu-e500", .setup_arch = qemu_e500_setup_arch, .init_IRQ = qemu_e500_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_coreint_irq, .progress = udbg_progress, .power_save = e500_idle, };
linux-master
arch/powerpc/platforms/85xx/qemu_e500.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * P1010RDB Board Setup * * Copyright 2011 Freescale Semiconductor Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/of.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc85xx.h" void __init p1010_rdb_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } /* * Setup the architecture */ static void __init p1010_rdb_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("p1010_rdb_setup_arch()", 0); fsl_pci_assign_primary(); printk(KERN_INFO "P1010 RDB board from Freescale Semiconductor\n"); } machine_arch_initcall(p1010_rdb, mpc85xx_common_publish_devices); /* * Called very early, device-tree isn't unflattened */ static int __init p1010_rdb_probe(void) { if (of_machine_is_compatible("fsl,P1010RDB")) return 1; if (of_machine_is_compatible("fsl,P1010RDB-PB")) return 1; return 0; } define_machine(p1010_rdb) { .name = "P1010 RDB", .probe = p1010_rdb_probe, .setup_arch = p1010_rdb_setup_arch, .init_IRQ = p1010_rdb_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/p1010rdb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2010-2011, 2013 Freescale Semiconductor, Inc. * * Author: Michael Johnston <[email protected]> * * Description: * TWR-P102x Board Setup */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/fsl/guts.h> #include <linux/pci.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/pci-bridge.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <soc/fsl/qe/qe.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "smp.h" #include "mpc85xx.h" static void __init twr_p1025_pic_init(void) { struct mpic *mpic; mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } /* ************************************************************************ * * Setup the architecture * */ static void __init twr_p1025_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("twr_p1025_setup_arch()", 0); mpc85xx_smp_init(); fsl_pci_assign_primary(); #ifdef CONFIG_QUICC_ENGINE mpc85xx_qe_par_io_init(); #if IS_ENABLED(CONFIG_UCC_GETH) || IS_ENABLED(CONFIG_SERIAL_QE) if (machine_is(twr_p1025)) { struct ccsr_guts __iomem *guts; struct device_node *np; np = of_find_compatible_node(NULL, NULL, "fsl,p1021-guts"); if (np) { guts = of_iomap(np, 0); if (!guts) pr_err("twr_p1025: could not map global utilities register\n"); else { /* P1025 has pins muxed for QE and other functions. To * enable QE UEC mode, we need to set bit QE0 for UCC1 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 * and QE12 for QE MII management signals in PMUXCR * register. * Set QE mux bits in PMUXCR */ setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) | MPC85xx_PMUXCR_QE(3) | MPC85xx_PMUXCR_QE(9) | MPC85xx_PMUXCR_QE(12)); iounmap(guts); #if IS_ENABLED(CONFIG_SERIAL_QE) /* On P1025TWR board, the UCC7 acted as UART port. * However, The UCC7's CTS pin is low level in default, * it will impact the transmission in full duplex * communication. So disable the Flow control pin PA18. * The UCC7 UART just can use RXD and TXD pins. */ par_io_config_pin(0, 18, 0, 0, 0, 0); #endif /* Drive PB29 to CPLD low - CPLD will then change * muxing from LBC to QE */ par_io_config_pin(1, 29, 1, 0, 0, 0); par_io_data_set(1, 29, 0); } of_node_put(np); } } #endif #endif /* CONFIG_QUICC_ENGINE */ pr_info("TWR-P1025 board from Freescale Semiconductor\n"); } machine_arch_initcall(twr_p1025, mpc85xx_common_publish_devices); define_machine(twr_p1025) { .name = "TWR-P1025", .compatible = "fsl,TWR-P1025", .setup_arch = twr_p1025_setup_arch, .init_IRQ = twr_p1025_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/twr_p102x.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * MPC85xx 8259 functions for DS Board Setup * * Author Xianghua Xiao ([email protected]) * Roy Zang <[email protected]> * - Add PCI/PCI Express support * Copyright 2007 Freescale Semiconductor Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <asm/mpic.h> #include <asm/i8259.h> #include "mpc85xx.h" static void mpc85xx_8259_cascade(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); if (cascade_irq) generic_handle_irq(cascade_irq); chip->irq_eoi(&desc->irq_data); } void __init mpc85xx_8259_init(void) { struct device_node *np; struct device_node *cascade_node = NULL; int cascade_irq; /* Initialize the i8259 controller */ for_each_node_by_type(np, "interrupt-controller") { if (of_device_is_compatible(np, "chrp,iic")) { cascade_node = np; break; } } if (cascade_node == NULL) { pr_debug("i8259: Could not find i8259 PIC\n"); return; } cascade_irq = irq_of_parse_and_map(cascade_node, 0); if (!cascade_irq) { pr_err("i8259: Failed to map cascade interrupt\n"); return; } pr_debug("i8259: cascade mapped to irq %d\n", cascade_irq); i8259_init(cascade_node, 0); of_node_put(cascade_node); irq_set_chained_handler(cascade_irq, mpc85xx_8259_cascade); }
linux-master
arch/powerpc/platforms/85xx/mpc85xx_8259.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Board setup routines for the Emerson/Artesyn MVME2500 * * Copyright 2014 Elettra-Sincrotrone Trieste S.C.p.A. * * Based on earlier code by: * * Xianghua Xiao ([email protected]) * Tom Armistead ([email protected]) * Copyright 2012 Emerson * * Author Alessio Igor Bogani <[email protected]> */ #include <linux/pci.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc85xx.h" void __init mvme2500_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } /* * Setup the architecture */ static void __init mvme2500_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("mvme2500_setup_arch()", 0); fsl_pci_assign_primary(); pr_info("MVME2500 board from Artesyn\n"); } machine_arch_initcall(mvme2500, mpc85xx_common_publish_devices); define_machine(mvme2500) { .name = "MVME2500", .compatible = "artesyn,MVME2500", .setup_arch = mvme2500_setup_arch, .init_IRQ = mvme2500_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/mvme2500.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Corenet based SoC DS Setup * * Maintained by Kumar Gala (see MAINTAINERS for contact information) * * Copyright 2009-2011 Freescale Semiconductor Inc. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/pgtable.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/ehv_pic.h> #include <asm/swiotlb.h> #include <linux/of_platform.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "smp.h" #include "mpc85xx.h" static void __init corenet_gen_pic_init(void) { struct mpic *mpic; unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU | MPIC_NO_RESET; if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) && !IS_ENABLED(CONFIG_KEXEC_CORE)) flags |= MPIC_ENABLE_COREINT; mpic = mpic_alloc(NULL, 0, flags, 0, 512, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } /* * Setup the architecture */ static void __init corenet_gen_setup_arch(void) { mpc85xx_smp_init(); swiotlb_detect_4g(); pr_info("%s board\n", ppc_md.name); } static const struct of_device_id of_device_ids[] = { { .compatible = "simple-bus" }, { .compatible = "mdio-mux-gpio" }, { .compatible = "fsl,fpga-ngpixis" }, { .compatible = "fsl,fpga-qixis" }, { .compatible = "fsl,srio", }, { .compatible = "fsl,p4080-pcie", }, { .compatible = "fsl,qoriq-pcie-v2.2", }, { .compatible = "fsl,qoriq-pcie-v2.3", }, { .compatible = "fsl,qoriq-pcie-v2.4", }, { .compatible = "fsl,qoriq-pcie-v3.0", }, { .compatible = "fsl,qe", }, /* The following two are for the Freescale hypervisor */ { .name = "hypervisor", }, { .name = "handles", }, {} }; static int __init corenet_gen_publish_devices(void) { return of_platform_bus_probe(NULL, of_device_ids, NULL); } machine_arch_initcall(corenet_generic, corenet_gen_publish_devices); static const char * const boards[] __initconst = { "fsl,P2041RDB", "fsl,P3041DS", "fsl,OCA4080", "fsl,P4080DS", "fsl,P5020DS", "fsl,P5040DS", "fsl,T2080QDS", "fsl,T2080RDB", "fsl,T2081QDS", "fsl,T4240QDS", "fsl,T4240RDB", "fsl,B4860QDS", "fsl,B4420QDS", "fsl,B4220QDS", "fsl,T1023RDB", "fsl,T1024QDS", "fsl,T1024RDB", "fsl,T1040D4RDB", "fsl,T1042D4RDB", "fsl,T1040QDS", "fsl,T1042QDS", "fsl,T1040RDB", "fsl,T1042RDB", "fsl,T1042RDB_PI", "keymile,kmcent2", "keymile,kmcoge4", "varisys,CYRUS", NULL }; /* * Called very early, device-tree isn't unflattened */ static int __init corenet_generic_probe(void) { char hv_compat[24]; int i; #ifdef CONFIG_SMP extern struct smp_ops_t smp_85xx_ops; #endif if (of_device_compatible_match(of_root, boards)) return 1; /* Check if we're running under the Freescale hypervisor */ for (i = 0; boards[i]; i++) { snprintf(hv_compat, sizeof(hv_compat), "%s-hv", boards[i]); if (of_machine_is_compatible(hv_compat)) { ppc_md.init_IRQ = ehv_pic_init; ppc_md.get_irq = ehv_pic_get_irq; ppc_md.restart = fsl_hv_restart; pm_power_off = fsl_hv_halt; ppc_md.halt = fsl_hv_halt; #ifdef CONFIG_SMP /* * Disable the timebase sync operations because we * can't write to the timebase registers under the * hypervisor. */ smp_85xx_ops.give_timebase = NULL; smp_85xx_ops.take_timebase = NULL; #endif return 1; } } return 0; } define_machine(corenet_generic) { .name = "CoreNet Generic", .probe = corenet_generic_probe, .setup_arch = corenet_gen_setup_arch, .init_IRQ = corenet_gen_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif /* * Core reset may cause issues if using the proxy mode of MPIC. * So, use the mixed mode of MPIC if enabling CPU hotplug. * * Likewise, problems have been seen with kexec when coreint is enabled. */ #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE) .get_irq = mpic_get_irq, #else .get_irq = mpic_get_coreint_irq, #endif .progress = udbg_progress, .power_save = e500_idle, };
linux-master
arch/powerpc/platforms/85xx/corenet_generic.c
/* * P1022DS board specific routines * * Authors: Travis Wheatley <[email protected]> * Dave Liu <[email protected]> * Timur Tabi <[email protected]> * * Copyright 2010 Freescale Semiconductor, Inc. * * This file is taken from the Freescale P1022DS BSP, with modifications: * 2) No AMP support * 3) No PCI endpoint support * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/fsl/guts.h> #include <linux/pci.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/div64.h> #include <asm/mpic.h> #include <asm/swiotlb.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <asm/udbg.h> #include <asm/fsl_lbc.h> #include "smp.h" #include "mpc85xx.h" #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) #define PMUXCR_ELBCDIU_MASK 0xc0000000 #define PMUXCR_ELBCDIU_NOR16 0x80000000 #define PMUXCR_ELBCDIU_DIU 0x40000000 /* * Board-specific initialization of the DIU. This code should probably be * executed when the DIU is opened, rather than in arch code, but the DIU * driver does not have a mechanism for this (yet). * * This is especially problematic on the P1022DS because the local bus (eLBC) * and the DIU video signals share the same pins, which means that enabling the * DIU will disable access to NOR flash. */ /* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */ #define CLKDVDR_PXCKEN 0x80000000 #define CLKDVDR_PXCKINV 0x10000000 #define CLKDVDR_PXCKDLY 0x06000000 #define CLKDVDR_PXCLK_MASK 0x00FF0000 /* Some ngPIXIS register definitions */ #define PX_CTL 3 #define PX_BRDCFG0 8 #define PX_BRDCFG1 9 #define PX_BRDCFG0_ELBC_SPI_MASK 0xc0 #define PX_BRDCFG0_ELBC_SPI_ELBC 0x00 #define PX_BRDCFG0_ELBC_SPI_NULL 0xc0 #define PX_BRDCFG0_ELBC_DIU 0x02 #define PX_BRDCFG1_DVIEN 0x80 #define PX_BRDCFG1_DFPEN 0x40 #define PX_BRDCFG1_BACKLIGHT 0x20 #define PX_BRDCFG1_DDCEN 0x10 #define PX_CTL_ALTACC 0x80 /* * DIU Area Descriptor * * Note that we need to byte-swap the value before it's written to the AD * register. So even though the registers don't look like they're in the same * bit positions as they are on the MPC8610, the same value is written to the * AD register on the MPC8610 and on the P1022. */ #define AD_BYTE_F 0x10000000 #define AD_ALPHA_C_MASK 0x0E000000 #define AD_ALPHA_C_SHIFT 25 #define AD_BLUE_C_MASK 0x01800000 #define AD_BLUE_C_SHIFT 23 #define AD_GREEN_C_MASK 0x00600000 #define AD_GREEN_C_SHIFT 21 #define AD_RED_C_MASK 0x00180000 #define AD_RED_C_SHIFT 19 #define AD_PALETTE 0x00040000 #define AD_PIXEL_S_MASK 0x00030000 #define AD_PIXEL_S_SHIFT 16 #define AD_COMP_3_MASK 0x0000F000 #define AD_COMP_3_SHIFT 12 #define AD_COMP_2_MASK 0x00000F00 #define AD_COMP_2_SHIFT 8 #define AD_COMP_1_MASK 0x000000F0 #define AD_COMP_1_SHIFT 4 #define AD_COMP_0_MASK 0x0000000F #define AD_COMP_0_SHIFT 0 #define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \ cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \ (blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \ (red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \ (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \ (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT)) struct fsl_law { u32 lawbar; u32 reserved1; u32 lawar; u32 reserved[5]; }; #define LAWBAR_MASK 0x00F00000 #define LAWBAR_SHIFT 12 #define LAWAR_EN 0x80000000 #define LAWAR_TGT_MASK 0x01F00000 #define LAW_TRGT_IF_LBC (0x04 << 20) #define LAWAR_MASK (LAWAR_EN | LAWAR_TGT_MASK) #define LAWAR_MATCH (LAWAR_EN | LAW_TRGT_IF_LBC) #define BR_BA 0xFFFF8000 /* * Map a BRx value to a physical address * * The localbus BRx registers only store the lower 32 bits of the address. To * obtain the upper four bits, we need to scan the LAW table. The entry which * maps to the localbus will contain the upper four bits. */ static phys_addr_t lbc_br_to_phys(const void *ecm, unsigned int count, u32 br) { #ifndef CONFIG_PHYS_64BIT /* * If we only have 32-bit addressing, then the BRx address *is* the * physical address. */ return br & BR_BA; #else const struct fsl_law *law = ecm + 0xc08; unsigned int i; for (i = 0; i < count; i++) { u64 lawbar = in_be32(&law[i].lawbar); u32 lawar = in_be32(&law[i].lawar); if ((lawar & LAWAR_MASK) == LAWAR_MATCH) /* Extract the upper four bits */ return (br & BR_BA) | ((lawbar & LAWBAR_MASK) << 12); } return 0; #endif } /** * p1022ds_set_monitor_port: switch the output to a different monitor port */ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port) { struct device_node *guts_node; struct device_node *lbc_node = NULL; struct device_node *law_node = NULL; struct ccsr_guts __iomem *guts; struct fsl_lbc_regs *lbc = NULL; void *ecm = NULL; u8 __iomem *lbc_lcs0_ba = NULL; u8 __iomem *lbc_lcs1_ba = NULL; phys_addr_t cs0_addr, cs1_addr; u32 br0, or0, br1, or1; const __be32 *iprop; unsigned int num_laws; u8 b; /* Map the global utilities registers. */ guts_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts"); if (!guts_node) { pr_err("p1022ds: missing global utilities device node\n"); return; } guts = of_iomap(guts_node, 0); if (!guts) { pr_err("p1022ds: could not map global utilities device\n"); goto exit; } lbc_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc"); if (!lbc_node) { pr_err("p1022ds: missing localbus node\n"); goto exit; } lbc = of_iomap(lbc_node, 0); if (!lbc) { pr_err("p1022ds: could not map localbus node\n"); goto exit; } law_node = of_find_compatible_node(NULL, NULL, "fsl,ecm-law"); if (!law_node) { pr_err("p1022ds: missing local access window node\n"); goto exit; } ecm = of_iomap(law_node, 0); if (!ecm) { pr_err("p1022ds: could not map local access window node\n"); goto exit; } iprop = of_get_property(law_node, "fsl,num-laws", NULL); if (!iprop) { pr_err("p1022ds: LAW node is missing fsl,num-laws property\n"); goto exit; } num_laws = be32_to_cpup(iprop); /* * Indirect mode requires both BR0 and BR1 to be set to "GPCM", * otherwise writes to these addresses won't actually appear on the * local bus, and so the PIXIS won't see them. * * In FCM mode, writes go to the NAND controller, which does not pass * them to the localbus directly. So we force BR0 and BR1 into GPCM * mode, since we don't care about what's behind the localbus any * more. */ br0 = in_be32(&lbc->bank[0].br); br1 = in_be32(&lbc->bank[1].br); or0 = in_be32(&lbc->bank[0].or); or1 = in_be32(&lbc->bank[1].or); /* Make sure CS0 and CS1 are programmed */ if (!(br0 & BR_V) || !(br1 & BR_V)) { pr_err("p1022ds: CS0 and/or CS1 is not programmed\n"); goto exit; } /* * Use the existing BRx/ORx values if it's already GPCM. Otherwise, * force the values to simple 32KB GPCM windows with the most * conservative timing. */ if ((br0 & BR_MSEL) != BR_MS_GPCM) { br0 = (br0 & BR_BA) | BR_V; or0 = 0xFFFF8000 | 0xFF7; out_be32(&lbc->bank[0].br, br0); out_be32(&lbc->bank[0].or, or0); } if ((br1 & BR_MSEL) != BR_MS_GPCM) { br1 = (br1 & BR_BA) | BR_V; or1 = 0xFFFF8000 | 0xFF7; out_be32(&lbc->bank[1].br, br1); out_be32(&lbc->bank[1].or, or1); } cs0_addr = lbc_br_to_phys(ecm, num_laws, br0); if (!cs0_addr) { pr_err("p1022ds: could not determine physical address for CS0" " (BR0=%08x)\n", br0); goto exit; } cs1_addr = lbc_br_to_phys(ecm, num_laws, br1); if (!cs1_addr) { pr_err("p1022ds: could not determine physical address for CS1" " (BR1=%08x)\n", br1); goto exit; } lbc_lcs0_ba = ioremap(cs0_addr, 1); if (!lbc_lcs0_ba) { pr_err("p1022ds: could not ioremap CS0 address %llx\n", (unsigned long long)cs0_addr); goto exit; } lbc_lcs1_ba = ioremap(cs1_addr, 1); if (!lbc_lcs1_ba) { pr_err("p1022ds: could not ioremap CS1 address %llx\n", (unsigned long long)cs1_addr); goto exit; } /* Make sure we're in indirect mode first. */ if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) != PMUXCR_ELBCDIU_DIU) { struct device_node *pixis_node; void __iomem *pixis; pixis_node = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-fpga"); if (!pixis_node) { pr_err("p1022ds: missing pixis node\n"); goto exit; } pixis = of_iomap(pixis_node, 0); of_node_put(pixis_node); if (!pixis) { pr_err("p1022ds: could not map pixis registers\n"); goto exit; } /* Enable indirect PIXIS mode. */ setbits8(pixis + PX_CTL, PX_CTL_ALTACC); iounmap(pixis); /* Switch the board mux to the DIU */ out_8(lbc_lcs0_ba, PX_BRDCFG0); /* BRDCFG0 */ b = in_8(lbc_lcs1_ba); b |= PX_BRDCFG0_ELBC_DIU; out_8(lbc_lcs1_ba, b); /* Set the chip mux to DIU mode. */ clrsetbits_be32(&guts->pmuxcr, PMUXCR_ELBCDIU_MASK, PMUXCR_ELBCDIU_DIU); in_be32(&guts->pmuxcr); } switch (port) { case FSL_DIU_PORT_DVI: /* Enable the DVI port, disable the DFP and the backlight */ out_8(lbc_lcs0_ba, PX_BRDCFG1); b = in_8(lbc_lcs1_ba); b &= ~(PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT); b |= PX_BRDCFG1_DVIEN; out_8(lbc_lcs1_ba, b); break; case FSL_DIU_PORT_LVDS: /* * LVDS also needs backlight enabled, otherwise the display * will be blank. */ /* Enable the DFP port, disable the DVI and the backlight */ out_8(lbc_lcs0_ba, PX_BRDCFG1); b = in_8(lbc_lcs1_ba); b &= ~PX_BRDCFG1_DVIEN; b |= PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT; out_8(lbc_lcs1_ba, b); break; default: pr_err("p1022ds: unsupported monitor port %i\n", port); } exit: if (lbc_lcs1_ba) iounmap(lbc_lcs1_ba); if (lbc_lcs0_ba) iounmap(lbc_lcs0_ba); if (lbc) iounmap(lbc); if (ecm) iounmap(ecm); if (guts) iounmap(guts); of_node_put(law_node); of_node_put(lbc_node); of_node_put(guts_node); } /** * p1022ds_set_pixel_clock: program the DIU's clock * * @pixclock: the wavelength, in picoseconds, of the clock */ void p1022ds_set_pixel_clock(unsigned int pixclock) { struct device_node *guts_np = NULL; struct ccsr_guts __iomem *guts; unsigned long freq; u64 temp; u32 pxclk; /* Map the global utilities registers. */ guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts"); if (!guts_np) { pr_err("p1022ds: missing global utilities device node\n"); return; } guts = of_iomap(guts_np, 0); of_node_put(guts_np); if (!guts) { pr_err("p1022ds: could not map global utilities device\n"); return; } /* Convert pixclock from a wavelength to a frequency */ temp = 1000000000000ULL; do_div(temp, pixclock); freq = temp; /* * 'pxclk' is the ratio of the platform clock to the pixel clock. * This number is programmed into the CLKDVDR register, and the valid * range of values is 2-255. */ pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq); pxclk = clamp_t(u32, pxclk, 2, 255); /* Disable the pixel clock, and set it to non-inverted and no delay */ clrbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK); /* Enable the clock and set the pxclk */ setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16)); iounmap(guts); } /** * p1022ds_valid_monitor_port: set the monitor port for sysfs */ enum fsl_diu_monitor_port p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port) { switch (port) { case FSL_DIU_PORT_DVI: case FSL_DIU_PORT_LVDS: return port; default: return FSL_DIU_PORT_DVI; /* Dual-link LVDS is not supported */ } } #endif void __init p1022_ds_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) /* TRUE if there is a "video=fslfb" command-line parameter. */ static bool fslfb; /* * Search for a "video=fslfb" command-line parameter, and set 'fslfb' to * true if we find it. * * We need to use early_param() instead of __setup() because the normal * __setup() gets called to late. However, early_param() gets called very * early, before the device tree is unflattened, so all we can do now is set a * global variable. Later on, p1022_ds_setup_arch() will use that variable * to determine if we need to update the device tree. */ static int __init early_video_setup(char *options) { fslfb = (strncmp(options, "fslfb:", 6) == 0); return 0; } early_param("video", early_video_setup); #endif /* * Setup the architecture */ static void __init p1022_ds_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("p1022_ds_setup_arch()", 0); #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) diu_ops.set_monitor_port = p1022ds_set_monitor_port; diu_ops.set_pixel_clock = p1022ds_set_pixel_clock; diu_ops.valid_monitor_port = p1022ds_valid_monitor_port; /* * Disable the NOR and NAND flash nodes if there is video=fslfb... * command-line parameter. When the DIU is active, the localbus is * unavailable, so we have to disable these nodes before the MTD * driver loads. */ if (fslfb) { struct device_node *np = of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc"); if (np) { struct device_node *np2; of_node_get(np); np2 = of_find_compatible_node(np, NULL, "cfi-flash"); if (np2) { static struct property nor_status = { .name = "status", .value = "disabled", .length = sizeof("disabled"), }; /* * of_update_property() is called before * kmalloc() is available, so the 'new' object * should be allocated in the global area. * The easiest way is to do that is to * allocate one static local variable for each * call to this function. */ pr_info("p1022ds: disabling %pOF node", np2); of_update_property(np2, &nor_status); of_node_put(np2); } of_node_get(np); np2 = of_find_compatible_node(np, NULL, "fsl,elbc-fcm-nand"); if (np2) { static struct property nand_status = { .name = "status", .value = "disabled", .length = sizeof("disabled"), }; pr_info("p1022ds: disabling %pOF node", np2); of_update_property(np2, &nand_status); of_node_put(np2); } of_node_put(np); } } #endif mpc85xx_smp_init(); fsl_pci_assign_primary(); swiotlb_detect_4g(); pr_info("Freescale P1022 DS reference board\n"); } machine_arch_initcall(p1022_ds, mpc85xx_common_publish_devices); define_machine(p1022_ds) { .name = "P1022 DS", .compatible = "fsl,p1022ds", .setup_arch = p1022_ds_setup_arch, .init_IRQ = p1022_ds_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/p1022_ds.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * GE IMP3A Board Setup * * Author Martyn Welch <[email protected]> * * Copyright 2010 GE Intelligent Platforms Embedded Systems, Inc. * * Based on: mpc85xx_ds.c (MPC85xx DS Board Setup) * Copyright 2007 Freescale Semiconductor Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/swiotlb.h> #include <asm/nvram.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "smp.h" #include "mpc85xx.h" #include <sysdev/ge/ge_pic.h> void __iomem *imp3a_regs; void __init ge_imp3a_pic_init(void) { struct mpic *mpic; struct device_node *np; struct device_node *cascade_node = NULL; if (of_machine_is_compatible("fsl,MPC8572DS-CAMP")) { mpic = mpic_alloc(NULL, 0, MPIC_NO_RESET | MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); } else { mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); } BUG_ON(mpic == NULL); mpic_init(mpic); /* * There is a simple interrupt handler in the main FPGA, this needs * to be cascaded into the MPIC */ for_each_node_by_type(np, "interrupt-controller") if (of_device_is_compatible(np, "gef,fpga-pic-1.00")) { cascade_node = np; break; } if (cascade_node == NULL) { printk(KERN_WARNING "IMP3A: No FPGA PIC\n"); return; } gef_pic_init(cascade_node); of_node_put(cascade_node); } static void __init ge_imp3a_pci_assign_primary(void) { #ifdef CONFIG_PCI struct device_node *np; struct resource rsrc; for_each_node_by_type(np, "pci") { if (of_device_is_compatible(np, "fsl,mpc8540-pci") || of_device_is_compatible(np, "fsl,mpc8548-pcie") || of_device_is_compatible(np, "fsl,p2020-pcie")) { of_address_to_resource(np, 0, &rsrc); if ((rsrc.start & 0xfffff) == 0x9000) { of_node_put(fsl_pci_primary); fsl_pci_primary = of_node_get(np); } } } #endif } /* * Setup the architecture */ static void __init ge_imp3a_setup_arch(void) { struct device_node *regs; if (ppc_md.progress) ppc_md.progress("ge_imp3a_setup_arch()", 0); mpc85xx_smp_init(); ge_imp3a_pci_assign_primary(); swiotlb_detect_4g(); /* Remap basic board registers */ regs = of_find_compatible_node(NULL, NULL, "ge,imp3a-fpga-regs"); if (regs) { imp3a_regs = of_iomap(regs, 0); if (imp3a_regs == NULL) printk(KERN_WARNING "Unable to map board registers\n"); of_node_put(regs); } #if defined(CONFIG_MMIO_NVRAM) mmio_nvram_init(); #endif printk(KERN_INFO "GE Intelligent Platforms IMP3A 3U cPCI SBC\n"); } /* Return the PCB revision */ static unsigned int ge_imp3a_get_pcb_rev(void) { unsigned int reg; reg = ioread16(imp3a_regs); return (reg >> 8) & 0xff; } /* Return the board (software) revision */ static unsigned int ge_imp3a_get_board_rev(void) { unsigned int reg; reg = ioread16(imp3a_regs + 0x2); return reg & 0xff; } /* Return the FPGA revision */ static unsigned int ge_imp3a_get_fpga_rev(void) { unsigned int reg; reg = ioread16(imp3a_regs + 0x2); return (reg >> 8) & 0xff; } /* Return compactPCI Geographical Address */ static unsigned int ge_imp3a_get_cpci_geo_addr(void) { unsigned int reg; reg = ioread16(imp3a_regs + 0x6); return (reg & 0x0f00) >> 8; } /* Return compactPCI System Controller Status */ static unsigned int ge_imp3a_get_cpci_is_syscon(void) { unsigned int reg; reg = ioread16(imp3a_regs + 0x6); return reg & (1 << 12); } static void ge_imp3a_show_cpuinfo(struct seq_file *m) { seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n"); seq_printf(m, "Revision\t: %u%c\n", ge_imp3a_get_pcb_rev(), ('A' + ge_imp3a_get_board_rev() - 1)); seq_printf(m, "FPGA Revision\t: %u\n", ge_imp3a_get_fpga_rev()); seq_printf(m, "cPCI geo. addr\t: %u\n", ge_imp3a_get_cpci_geo_addr()); seq_printf(m, "cPCI syscon\t: %s\n", ge_imp3a_get_cpci_is_syscon() ? "yes" : "no"); } machine_arch_initcall(ge_imp3a, mpc85xx_common_publish_devices); define_machine(ge_imp3a) { .name = "GE_IMP3A", .compatible = "ge,IMP3A", .setup_arch = ge_imp3a_setup_arch, .init_IRQ = ge_imp3a_pic_init, .show_cpuinfo = ge_imp3a_show_cpuinfo, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/ge_imp3a.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * BSC913xQDS Board Setup * * Author: * Harninder Rai <[email protected]> * Priyanka Jain <[email protected]> * * Copyright 2014 Freescale Semiconductor Inc. */ #include <linux/of.h> #include <linux/pci.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <asm/udbg.h> #include "mpc85xx.h" #include "smp.h" void __init bsc913x_qds_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); if (!mpic) pr_err("bsc913x: Failed to allocate MPIC structure\n"); else mpic_init(mpic); } /* * Setup the architecture */ static void __init bsc913x_qds_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("bsc913x_qds_setup_arch()", 0); #if defined(CONFIG_SMP) mpc85xx_smp_init(); #endif fsl_pci_assign_primary(); pr_info("bsc913x board from Freescale Semiconductor\n"); } machine_arch_initcall(bsc9132_qds, mpc85xx_common_publish_devices); define_machine(bsc9132_qds) { .name = "BSC9132 QDS", .compatible = "fsl,bsc9132qds", .setup_arch = bsc913x_qds_setup_arch, .init_IRQ = bsc913x_qds_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/bsc913x_qds.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2010-2011, 2013 Freescale Semiconductor, Inc. * * Author: Roy Zang <[email protected]> * * Description: * P1023 RDB Board Setup */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/fsl_devices.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/mpic.h> #include "smp.h" #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc85xx.h" /* ************************************************************************ * * Setup the architecture * */ static void __init p1023_rdb_setup_arch(void) { struct device_node *np; if (ppc_md.progress) ppc_md.progress("p1023_rdb_setup_arch()", 0); /* Map BCSR area */ np = of_find_node_by_name(NULL, "bcsr"); if (np != NULL) { static u8 __iomem *bcsr_regs; bcsr_regs = of_iomap(np, 0); of_node_put(np); if (!bcsr_regs) { printk(KERN_ERR "BCSR: Failed to map bcsr register space\n"); return; } else { #define BCSR15_I2C_BUS0_SEG_CLR 0x07 #define BCSR15_I2C_BUS0_SEG2 0x02 /* * Note: Accessing exclusively i2c devices. * * The i2c controller selects initially ID EEPROM in the u-boot; * but if menu configuration selects RTC support in the kernel, * the i2c controller switches to select RTC chip in the kernel. */ #ifdef CONFIG_RTC_CLASS /* Enable RTC chip on the segment #2 of i2c */ clrbits8(&bcsr_regs[15], BCSR15_I2C_BUS0_SEG_CLR); setbits8(&bcsr_regs[15], BCSR15_I2C_BUS0_SEG2); #endif iounmap(bcsr_regs); } } mpc85xx_smp_init(); fsl_pci_assign_primary(); } machine_arch_initcall(p1023_rdb, mpc85xx_common_publish_devices); static void __init p1023_rdb_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } define_machine(p1023_rdb) { .name = "P1023 RDB", .compatible = "fsl,P1023RDB", .setup_arch = p1023_rdb_setup_arch, .init_IRQ = p1023_rdb_pic_init, .get_irq = mpic_get_irq, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif };
linux-master
arch/powerpc/platforms/85xx/p1023_rdb.c
/* * P1022 RDK board specific routines * * Copyright 2012 Freescale Semiconductor, Inc. * * Author: Timur Tabi <[email protected]> * * Based on p1022_ds.c * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/fsl/guts.h> #include <linux/pci.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/div64.h> #include <asm/mpic.h> #include <asm/swiotlb.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <asm/udbg.h> #include "smp.h" #include "mpc85xx.h" #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) /* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */ #define CLKDVDR_PXCKEN 0x80000000 #define CLKDVDR_PXCKINV 0x10000000 #define CLKDVDR_PXCKDLY 0x06000000 #define CLKDVDR_PXCLK_MASK 0x00FF0000 /** * p1022rdk_set_pixel_clock: program the DIU's clock * * @pixclock: the wavelength, in picoseconds, of the clock */ void p1022rdk_set_pixel_clock(unsigned int pixclock) { struct device_node *guts_np = NULL; struct ccsr_guts __iomem *guts; unsigned long freq; u64 temp; u32 pxclk; /* Map the global utilities registers. */ guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts"); if (!guts_np) { pr_err("p1022rdk: missing global utilities device node\n"); return; } guts = of_iomap(guts_np, 0); of_node_put(guts_np); if (!guts) { pr_err("p1022rdk: could not map global utilities device\n"); return; } /* Convert pixclock from a wavelength to a frequency */ temp = 1000000000000ULL; do_div(temp, pixclock); freq = temp; /* * 'pxclk' is the ratio of the platform clock to the pixel clock. * This number is programmed into the CLKDVDR register, and the valid * range of values is 2-255. */ pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq); pxclk = clamp_t(u32, pxclk, 2, 255); /* Disable the pixel clock, and set it to non-inverted and no delay */ clrbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK); /* Enable the clock and set the pxclk */ setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16)); iounmap(guts); } /** * p1022rdk_valid_monitor_port: set the monitor port for sysfs */ enum fsl_diu_monitor_port p1022rdk_valid_monitor_port(enum fsl_diu_monitor_port port) { return FSL_DIU_PORT_DVI; } #endif void __init p1022_rdk_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } /* * Setup the architecture */ static void __init p1022_rdk_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("p1022_rdk_setup_arch()", 0); #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) diu_ops.set_pixel_clock = p1022rdk_set_pixel_clock; diu_ops.valid_monitor_port = p1022rdk_valid_monitor_port; #endif mpc85xx_smp_init(); fsl_pci_assign_primary(); swiotlb_detect_4g(); pr_info("Freescale / iVeia P1022 RDK reference board\n"); } machine_arch_initcall(p1022_rdk, mpc85xx_common_publish_devices); define_machine(p1022_rdk) { .name = "P1022 RDK", .compatible = "fsl,p1022rdk", .setup_arch = p1022_rdk_setup_arch, .init_IRQ = p1022_rdk_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/p1022_rdk.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * MPC8536 DS Board Setup * * Copyright 2008 Freescale Semiconductor, Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/of.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/swiotlb.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc85xx.h" void __init mpc8536_ds_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } /* * Setup the architecture */ static void __init mpc8536_ds_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("mpc8536_ds_setup_arch()", 0); fsl_pci_assign_primary(); swiotlb_detect_4g(); printk("MPC8536 DS board from Freescale Semiconductor\n"); } machine_arch_initcall(mpc8536_ds, mpc85xx_common_publish_devices); define_machine(mpc8536_ds) { .name = "MPC8536 DS", .compatible = "fsl,mpc8536ds", .setup_arch = mpc8536_ds_setup_arch, .init_IRQ = mpc8536_ds_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/mpc8536_ds.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2006-2010, 2012-2013 Freescale Semiconductor, Inc. * All rights reserved. * * Author: Andy Fleming <[email protected]> * * Based on 83xx/mpc8360e_pb.c by: * Li Yang <[email protected]> * Yin Olivia <[email protected]> * * Description: * MPC85xx MDS board specific routines. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/initrd.h> #include <linux/fsl_devices.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/phy.h> #include <linux/memblock.h> #include <linux/fsl/guts.h> #include <linux/atomic.h> #include <asm/time.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/irq.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <soc/fsl/qe/qe.h> #include <asm/mpic.h> #include <asm/swiotlb.h> #include "smp.h" #include "mpc85xx.h" #if IS_BUILTIN(CONFIG_PHYLIB) #define MV88E1111_SCR 0x10 #define MV88E1111_SCR_125CLK 0x0010 static int mpc8568_fixup_125_clock(struct phy_device *phydev) { int scr; int err; /* Workaround for the 125 CLK Toggle */ scr = phy_read(phydev, MV88E1111_SCR); if (scr < 0) return scr; err = phy_write(phydev, MV88E1111_SCR, scr & ~(MV88E1111_SCR_125CLK)); if (err) return err; err = phy_write(phydev, MII_BMCR, BMCR_RESET); if (err) return err; scr = phy_read(phydev, MV88E1111_SCR); if (scr < 0) return scr; err = phy_write(phydev, MV88E1111_SCR, scr | 0x0008); return err; } static int mpc8568_mds_phy_fixups(struct phy_device *phydev) { int temp; int err; /* Errata */ err = phy_write(phydev,29, 0x0006); if (err) return err; temp = phy_read(phydev, 30); if (temp < 0) return temp; temp = (temp & (~0x8000)) | 0x4000; err = phy_write(phydev,30, temp); if (err) return err; err = phy_write(phydev,29, 0x000a); if (err) return err; temp = phy_read(phydev, 30); if (temp < 0) return temp; temp = phy_read(phydev, 30); if (temp < 0) return temp; temp &= ~0x0020; err = phy_write(phydev,30,temp); if (err) return err; /* Disable automatic MDI/MDIX selection */ temp = phy_read(phydev, 16); if (temp < 0) return temp; temp &= ~0x0060; err = phy_write(phydev,16,temp); return err; } #endif /* ************************************************************************ * * Setup the architecture * */ #ifdef CONFIG_QUICC_ENGINE static void __init mpc85xx_mds_reset_ucc_phys(void) { struct device_node *np; static u8 __iomem *bcsr_regs; /* Map BCSR area */ np = of_find_node_by_name(NULL, "bcsr"); if (!np) return; bcsr_regs = of_iomap(np, 0); of_node_put(np); if (!bcsr_regs) return; if (machine_is(mpc8568_mds)) { #define BCSR_UCC1_GETH_EN (0x1 << 7) #define BCSR_UCC2_GETH_EN (0x1 << 7) #define BCSR_UCC1_MODE_MSK (0x3 << 4) #define BCSR_UCC2_MODE_MSK (0x3 << 0) /* Turn off UCC1 & UCC2 */ clrbits8(&bcsr_regs[8], BCSR_UCC1_GETH_EN); clrbits8(&bcsr_regs[9], BCSR_UCC2_GETH_EN); /* Mode is RGMII, all bits clear */ clrbits8(&bcsr_regs[11], BCSR_UCC1_MODE_MSK | BCSR_UCC2_MODE_MSK); /* Turn UCC1 & UCC2 on */ setbits8(&bcsr_regs[8], BCSR_UCC1_GETH_EN); setbits8(&bcsr_regs[9], BCSR_UCC2_GETH_EN); } else if (machine_is(mpc8569_mds)) { #define BCSR7_UCC12_GETHnRST (0x1 << 2) #define BCSR8_UEM_MARVELL_RST (0x1 << 1) #define BCSR_UCC_RGMII (0x1 << 6) #define BCSR_UCC_RTBI (0x1 << 5) /* * U-Boot mangles interrupt polarity for Marvell PHYs, * so reset built-in and UEM Marvell PHYs, this puts * the PHYs into their normal state. */ clrbits8(&bcsr_regs[7], BCSR7_UCC12_GETHnRST); setbits8(&bcsr_regs[8], BCSR8_UEM_MARVELL_RST); setbits8(&bcsr_regs[7], BCSR7_UCC12_GETHnRST); clrbits8(&bcsr_regs[8], BCSR8_UEM_MARVELL_RST); for_each_compatible_node(np, "network", "ucc_geth") { const unsigned int *prop; int ucc_num; prop = of_get_property(np, "cell-index", NULL); if (prop == NULL) continue; ucc_num = *prop - 1; prop = of_get_property(np, "phy-connection-type", NULL); if (prop == NULL) continue; if (strcmp("rtbi", (const char *)prop) == 0) clrsetbits_8(&bcsr_regs[7 + ucc_num], BCSR_UCC_RGMII, BCSR_UCC_RTBI); } } else if (machine_is(p1021_mds)) { #define BCSR11_ENET_MICRST (0x1 << 5) /* Reset Micrel PHY */ clrbits8(&bcsr_regs[11], BCSR11_ENET_MICRST); setbits8(&bcsr_regs[11], BCSR11_ENET_MICRST); } iounmap(bcsr_regs); } static void __init mpc85xx_mds_qe_init(void) { struct device_node *np; mpc85xx_qe_par_io_init(); mpc85xx_mds_reset_ucc_phys(); if (machine_is(p1021_mds)) { struct ccsr_guts __iomem *guts; np = of_find_node_by_name(NULL, "global-utilities"); if (np) { guts = of_iomap(np, 0); if (!guts) pr_err("mpc85xx-rdb: could not map global utilities register\n"); else{ /* P1021 has pins muxed for QE and other functions. To * enable QE UEC mode, we need to set bit QE0 for UCC1 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 * and QE12 for QE MII management signals in PMUXCR * register. */ setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) | MPC85xx_PMUXCR_QE(3) | MPC85xx_PMUXCR_QE(9) | MPC85xx_PMUXCR_QE(12)); iounmap(guts); } of_node_put(np); } } } #else static void __init mpc85xx_mds_qe_init(void) { } #endif /* CONFIG_QUICC_ENGINE */ static void __init mpc85xx_mds_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("mpc85xx_mds_setup_arch()", 0); mpc85xx_smp_init(); mpc85xx_mds_qe_init(); fsl_pci_assign_primary(); swiotlb_detect_4g(); } #if IS_BUILTIN(CONFIG_PHYLIB) static int __init board_fixups(void) { char phy_id[20]; char *compstrs[2] = {"fsl,gianfar-mdio", "fsl,ucc-mdio"}; struct device_node *mdio; struct resource res; int i; for (i = 0; i < ARRAY_SIZE(compstrs); i++) { mdio = of_find_compatible_node(NULL, NULL, compstrs[i]); of_address_to_resource(mdio, 0, &res); snprintf(phy_id, sizeof(phy_id), "%llx:%02x", (unsigned long long)res.start, 1); phy_register_fixup_for_id(phy_id, mpc8568_fixup_125_clock); phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups); /* Register a workaround for errata */ snprintf(phy_id, sizeof(phy_id), "%llx:%02x", (unsigned long long)res.start, 7); phy_register_fixup_for_id(phy_id, mpc8568_mds_phy_fixups); of_node_put(mdio); } return 0; } machine_arch_initcall(mpc8568_mds, board_fixups); machine_arch_initcall(mpc8569_mds, board_fixups); #endif static int __init mpc85xx_publish_devices(void) { return mpc85xx_common_publish_devices(); } machine_arch_initcall(mpc8568_mds, mpc85xx_publish_devices); machine_arch_initcall(mpc8569_mds, mpc85xx_publish_devices); machine_arch_initcall(p1021_mds, mpc85xx_common_publish_devices); static void __init mpc85xx_mds_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } define_machine(mpc8568_mds) { .name = "MPC8568 MDS", .compatible = "MPC85xxMDS", .setup_arch = mpc85xx_mds_setup_arch, .init_IRQ = mpc85xx_mds_pic_init, .get_irq = mpic_get_irq, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif }; define_machine(mpc8569_mds) { .name = "MPC8569 MDS", .compatible = "fsl,MPC8569EMDS", .setup_arch = mpc85xx_mds_setup_arch, .init_IRQ = mpc85xx_mds_pic_init, .get_irq = mpic_get_irq, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif }; define_machine(p1021_mds) { .name = "P1021 MDS", .compatible = "fsl,P1021MDS", .setup_arch = mpc85xx_mds_setup_arch, .init_IRQ = mpc85xx_mds_pic_init, .get_irq = mpic_get_irq, .progress = udbg_progress, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif };
linux-master
arch/powerpc/platforms/85xx/mpc85xx_mds.c
/* * Board setup routines for the Emerson KSI8560 * * Author: Alexandr Smirnov <[email protected]> * * Based on mpc85xx_ads.c maintained by Kumar Gala * * 2008 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. * */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include <asm/cpm2.h> #include <sysdev/cpm2_pic.h> #include "mpc85xx.h" #define KSI8560_CPLD_HVR 0x04 /* Hardware Version Register */ #define KSI8560_CPLD_PVR 0x08 /* PLD Version Register */ #define KSI8560_CPLD_RCR1 0x30 /* Reset Command Register 1 */ #define KSI8560_CPLD_RCR1_CPUHR 0x80 /* CPU Hard Reset */ static void __iomem *cpld_base = NULL; static void __noreturn machine_restart(char *cmd) { if (cpld_base) out_8(cpld_base + KSI8560_CPLD_RCR1, KSI8560_CPLD_RCR1_CPUHR); else printk(KERN_ERR "Can't find CPLD base, hang forever\n"); for (;;); } static void __init ksi8560_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); mpc85xx_cpm2_pic_init(); } #ifdef CONFIG_CPM2 /* * Setup I/O ports */ struct cpm_pin { int port, pin, flags; }; static struct cpm_pin __initdata ksi8560_pins[] = { /* SCC1 */ {3, 29, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* SCC2 */ {3, 26, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {3, 27, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {3, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* FCC1 */ {0, 14, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, {0, 18, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {0, 19, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {0, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {0, 21, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, {0, 26, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, {0, 27, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, {0, 28, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {0, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, {0, 30, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, {0, 31, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, {2, 23, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK9 */ {2, 22, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK10 */ }; static void __init init_ioports(void) { int i; for (i = 0; i < ARRAY_SIZE(ksi8560_pins); i++) { struct cpm_pin *pin = &ksi8560_pins[i]; cpm2_set_pin(pin->port, pin->pin, pin->flags); } cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK9, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_TX); } #endif /* * Setup the architecture */ static void __init ksi8560_setup_arch(void) { struct device_node *cpld; cpld = of_find_compatible_node(NULL, NULL, "emerson,KSI8560-cpld"); if (cpld) cpld_base = of_iomap(cpld, 0); else printk(KERN_ERR "Can't find CPLD in device tree\n"); of_node_put(cpld); if (ppc_md.progress) ppc_md.progress("ksi8560_setup_arch()", 0); #ifdef CONFIG_CPM2 cpm2_reset(); init_ioports(); #endif } static void ksi8560_show_cpuinfo(struct seq_file *m) { uint pvid, svid, phid1; pvid = mfspr(SPRN_PVR); svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: Emerson Network Power\n"); seq_printf(m, "Board\t\t: KSI8560\n"); if (cpld_base) { seq_printf(m, "Hardware rev\t: %d\n", in_8(cpld_base + KSI8560_CPLD_HVR)); seq_printf(m, "CPLD rev\t: %d\n", in_8(cpld_base + KSI8560_CPLD_PVR)); } else seq_printf(m, "Unknown Hardware and CPLD revs\n"); seq_printf(m, "PVR\t\t: 0x%x\n", pvid); seq_printf(m, "SVR\t\t: 0x%x\n", svid); /* Display cpu Pll setting */ phid1 = mfspr(SPRN_HID1); seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } machine_device_initcall(ksi8560, mpc85xx_common_publish_devices); define_machine(ksi8560) { .name = "KSI8560", .compatible = "emerson,KSI8560", .setup_arch = ksi8560_setup_arch, .init_IRQ = ksi8560_pic_init, .show_cpuinfo = ksi8560_show_cpuinfo, .get_irq = mpic_get_irq, .restart = machine_restart, };
linux-master
arch/powerpc/platforms/85xx/ksi8560.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2009 Extreme Engineering Solutions, Inc. * * X-ES board-specific functionality * * Based on mpc85xx_ds code from Freescale Semiconductor, Inc. * * Author: Nate Case <[email protected]> */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/of.h> #include <linux/of_address.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "smp.h" #include "mpc85xx.h" /* A few bit definitions needed for fixups on some boards */ #define MPC85xx_L2CTL_L2E 0x80000000 /* L2 enable */ #define MPC85xx_L2CTL_L2I 0x40000000 /* L2 flash invalidate */ #define MPC85xx_L2CTL_L2SIZ_MASK 0x30000000 /* L2 SRAM size (R/O) */ void __init xes_mpc85xx_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } static void __init xes_mpc85xx_configure_l2(void __iomem *l2_base) { volatile uint32_t ctl, tmp; asm volatile("msync; isync"); tmp = in_be32(l2_base); /* * xMon may have enabled part of L2 as SRAM, so we need to set it * up for all cache mode just to be safe. */ printk(KERN_INFO "xes_mpc85xx: Enabling L2 as cache\n"); ctl = MPC85xx_L2CTL_L2E | MPC85xx_L2CTL_L2I; if (of_machine_is_compatible("MPC8540") || of_machine_is_compatible("MPC8560")) /* * Assume L2 SRAM is used fully for cache, so set * L2BLKSZ (bits 4:5) to match L2SIZ (bits 2:3). */ ctl |= (tmp & MPC85xx_L2CTL_L2SIZ_MASK) >> 2; asm volatile("msync; isync"); out_be32(l2_base, ctl); asm volatile("msync; isync"); } static void __init xes_mpc85xx_fixups(void) { struct device_node *np; int err; /* * Legacy xMon firmware on some X-ES boards does not enable L2 * as cache. We must ensure that they get enabled here. */ for_each_node_by_name(np, "l2-cache-controller") { struct resource r[2]; void __iomem *l2_base; /* Only MPC8548, MPC8540, and MPC8560 boards are affected */ if (!of_device_is_compatible(np, "fsl,mpc8548-l2-cache-controller") && !of_device_is_compatible(np, "fsl,mpc8540-l2-cache-controller") && !of_device_is_compatible(np, "fsl,mpc8560-l2-cache-controller")) continue; err = of_address_to_resource(np, 0, &r[0]); if (err) { printk(KERN_WARNING "xes_mpc85xx: Could not get " "resource for device tree node '%pOF'", np); continue; } l2_base = ioremap(r[0].start, resource_size(&r[0])); xes_mpc85xx_configure_l2(l2_base); } } /* * Setup the architecture */ static void __init xes_mpc85xx_setup_arch(void) { struct device_node *root; const char *model = "Unknown"; root = of_find_node_by_path("/"); if (root == NULL) return; model = of_get_property(root, "model", NULL); printk(KERN_INFO "X-ES MPC85xx-based single-board computer: %s\n", model + strlen("xes,")); xes_mpc85xx_fixups(); mpc85xx_smp_init(); fsl_pci_assign_primary(); } machine_arch_initcall(xes_mpc8572, mpc85xx_common_publish_devices); machine_arch_initcall(xes_mpc8548, mpc85xx_common_publish_devices); machine_arch_initcall(xes_mpc8540, mpc85xx_common_publish_devices); define_machine(xes_mpc8572) { .name = "X-ES MPC8572", .compatible = "xes,MPC8572", .setup_arch = xes_mpc85xx_setup_arch, .init_IRQ = xes_mpc85xx_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, }; define_machine(xes_mpc8548) { .name = "X-ES MPC8548", .compatible = "xes,MPC8548", .setup_arch = xes_mpc85xx_setup_arch, .init_IRQ = xes_mpc85xx_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, }; define_machine(xes_mpc8540) { .name = "X-ES MPC8540", .compatible = "xes,MPC8540", .setup_arch = xes_mpc85xx_setup_arch, .init_IRQ = xes_mpc85xx_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/xes_mpc85xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2008 Emcraft Systems * Sergei Poselenov <[email protected]> * * Based on MPC8560 ADS and arch/ppc tqm85xx ports * * Maintained by Kumar Gala (see MAINTAINERS for contact information) * * Copyright 2008 Freescale Semiconductor Inc. * * Copyright (c) 2005-2006 DENX Software Engineering * Stefan Roese <[email protected]> * * Based on original work by * Kumar Gala <[email protected]> * Copyright 2004 Freescale Semiconductor Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/of.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc85xx.h" #include "socrates_fpga_pic.h" static void __init socrates_pic_init(void) { struct device_node *np; struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); np = of_find_compatible_node(NULL, NULL, "abb,socrates-fpga-pic"); if (!np) { printk(KERN_ERR "Could not find socrates-fpga-pic node\n"); return; } socrates_fpga_pic_init(np); of_node_put(np); } /* * Setup the architecture */ static void __init socrates_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("socrates_setup_arch()", 0); fsl_pci_assign_primary(); } machine_arch_initcall(socrates, mpc85xx_common_publish_devices); define_machine(socrates) { .name = "Socrates", .compatible = "abb,socrates", .setup_arch = socrates_setup_arch, .init_IRQ = socrates_pic_init, .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/socrates.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Based on MPC8560 ADS and arch/ppc tqm85xx ports * * Maintained by Kumar Gala (see MAINTAINERS for contact information) * * Copyright 2008 Freescale Semiconductor Inc. * * Copyright (c) 2005-2006 DENX Software Engineering * Stefan Roese <[email protected]> * * Based on original work by * Kumar Gala <[email protected]> * Copyright 2004 Freescale Semiconductor Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/of.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc85xx.h" #ifdef CONFIG_CPM2 #include <asm/cpm2.h> #endif /* CONFIG_CPM2 */ static void __init tqm85xx_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); mpc85xx_cpm2_pic_init(); } /* * Setup the architecture */ static void __init tqm85xx_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("tqm85xx_setup_arch()", 0); #ifdef CONFIG_CPM2 cpm2_reset(); #endif fsl_pci_assign_primary(); } static void tqm85xx_show_cpuinfo(struct seq_file *m) { uint pvid, svid, phid1; pvid = mfspr(SPRN_PVR); svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: TQ Components\n"); seq_printf(m, "PVR\t\t: 0x%x\n", pvid); seq_printf(m, "SVR\t\t: 0x%x\n", svid); /* Display cpu Pll setting */ phid1 = mfspr(SPRN_HID1); seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } static void tqm85xx_ti1520_fixup(struct pci_dev *pdev) { unsigned int val; /* Do not do the fixup on other platforms! */ if (!machine_is(tqm85xx)) return; dev_info(&pdev->dev, "Using TI 1520 fixup on TQM85xx\n"); /* * Enable P2CCLK bit in system control register * to enable CLOCK output to power chip */ pci_read_config_dword(pdev, 0x80, &val); pci_write_config_dword(pdev, 0x80, val | (1 << 27)); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_1520, tqm85xx_ti1520_fixup); machine_arch_initcall(tqm85xx, mpc85xx_common_publish_devices); static const char * const board[] __initconst = { "tqc,tqm8540", "tqc,tqm8541", "tqc,tqm8548", "tqc,tqm8555", "tqc,tqm8560", NULL }; /* * Called very early, device-tree isn't unflattened */ static int __init tqm85xx_probe(void) { return of_device_compatible_match(of_root, board); } define_machine(tqm85xx) { .name = "TQM85xx", .probe = tqm85xx_probe, .setup_arch = tqm85xx_setup_arch, .init_IRQ = tqm85xx_pic_init, .show_cpuinfo = tqm85xx_show_cpuinfo, .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/tqm85xx.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * C293PCIE Board Setup * * Copyright 2013 Freescale Semiconductor Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/of.h> #include <asm/machdep.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc85xx.h" static void __init c293_pcie_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } /* * Setup the architecture */ static void __init c293_pcie_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("c293_pcie_setup_arch()", 0); fsl_pci_assign_primary(); printk(KERN_INFO "C293 PCIE board from Freescale Semiconductor\n"); } machine_arch_initcall(c293_pcie, mpc85xx_common_publish_devices); define_machine(c293_pcie) { .name = "C293 PCIE", .compatible = "fsl,C293PCIE", .setup_arch = c293_pcie_setup_arch, .init_IRQ = c293_pcie_pic_init, .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/c293pcie.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Freescale P2020 board Setup * * Copyright 2007,2009,2012-2013 Freescale Semiconductor Inc. * Copyright 2022-2023 Pali Rohár <[email protected]> */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/of.h> #include <asm/machdep.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <asm/swiotlb.h> #include <asm/ppc-pci.h> #include <sysdev/fsl_pci.h> #include "smp.h" #include "mpc85xx.h" static void __init p2020_pic_init(void) { struct mpic *mpic; int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU; mpic = mpic_alloc(NULL, 0, flags, 0, 256, " OpenPIC "); if (WARN_ON(!mpic)) return; mpic_init(mpic); mpc85xx_8259_init(); } /* * Setup the architecture */ static void __init p2020_setup_arch(void) { swiotlb_detect_4g(); fsl_pci_assign_primary(); uli_init(); mpc85xx_smp_init(); mpc85xx_qe_par_io_init(); } /* * Called very early, device-tree isn't unflattened */ static int __init p2020_probe(void) { struct device_node *p2020_cpu; /* * There is no common compatible string for all P2020 boards. * The only common thing is "PowerPC,P2020@0" cpu node. * So check for P2020 board via this cpu node. */ p2020_cpu = of_find_node_by_path("/cpus/PowerPC,P2020@0"); of_node_put(p2020_cpu); return !!p2020_cpu; } machine_arch_initcall(p2020, mpc85xx_common_publish_devices); define_machine(p2020) { .name = "Freescale P2020", .probe = p2020_probe, .setup_arch = p2020_setup_arch, .init_IRQ = p2020_pic_init, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, .pcibios_fixup_phb = fsl_pcibios_fixup_phb, #endif .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/p2020.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Based on MPC8560 ADS and arch/ppc stx_gp3 ports * * Maintained by Kumar Gala (see MAINTAINERS for contact information) * * Copyright 2008 Freescale Semiconductor Inc. * * Dan Malek <[email protected]> * Copyright 2004 Embedded Edge, LLC * * Copied from mpc8560_ads.c * Copyright 2002, 2003 Motorola Inc. * * Ported to 2.6, Matt Porter <[email protected]> * Copyright 2004-2005 MontaVista Software, Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/of.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/mpic.h> #include <mm/mmu_decl.h> #include <asm/udbg.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc85xx.h" #ifdef CONFIG_CPM2 #include <asm/cpm2.h> #endif /* CONFIG_CPM2 */ static void __init stx_gp3_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); mpc85xx_cpm2_pic_init(); } /* * Setup the architecture */ static void __init stx_gp3_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("stx_gp3_setup_arch()", 0); fsl_pci_assign_primary(); #ifdef CONFIG_CPM2 cpm2_reset(); #endif } static void stx_gp3_show_cpuinfo(struct seq_file *m) { uint pvid, svid, phid1; pvid = mfspr(SPRN_PVR); svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: RPC Electronics STx\n"); seq_printf(m, "PVR\t\t: 0x%x\n", pvid); seq_printf(m, "SVR\t\t: 0x%x\n", svid); /* Display cpu Pll setting */ phid1 = mfspr(SPRN_HID1); seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } machine_arch_initcall(stx_gp3, mpc85xx_common_publish_devices); define_machine(stx_gp3) { .name = "STX GP3", .compatible = "stx,gp3-8560", .setup_arch = stx_gp3_setup_arch, .init_IRQ = stx_gp3_pic_init, .show_cpuinfo = stx_gp3_show_cpuinfo, .get_irq = mpic_get_irq, .progress = udbg_progress, };
linux-master
arch/powerpc/platforms/85xx/stx_gp3.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Author: Andy Fleming <[email protected]> * Kumar Gala <[email protected]> * * Copyright 2006-2008, 2011-2012, 2015 Freescale Semiconductor Inc. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/sched/hotplug.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/of.h> #include <linux/kexec.h> #include <linux/highmem.h> #include <linux/cpu.h> #include <linux/fsl/guts.h> #include <linux/pgtable.h> #include <asm/machdep.h> #include <asm/page.h> #include <asm/mpic.h> #include <asm/cacheflush.h> #include <asm/dbell.h> #include <asm/code-patching.h> #include <asm/cputhreads.h> #include <asm/fsl_pm.h> #include <sysdev/fsl_soc.h> #include <sysdev/mpic.h> #include "smp.h" struct epapr_spin_table { u32 addr_h; u32 addr_l; u32 r3_h; u32 r3_l; u32 reserved; u32 pir; }; static u64 timebase; static int tb_req; static int tb_valid; static void mpc85xx_give_timebase(void) { unsigned long flags; local_irq_save(flags); hard_irq_disable(); while (!tb_req) barrier(); tb_req = 0; qoriq_pm_ops->freeze_time_base(true); #ifdef CONFIG_PPC64 /* * e5500/e6500 have a workaround for erratum A-006958 in place * that will reread the timebase until TBL is non-zero. * That would be a bad thing when the timebase is frozen. * * Thus, we read it manually, and instead of checking that * TBL is non-zero, we ensure that TB does not change. We don't * do that for the main mftb implementation, because it requires * a scratch register */ { u64 prev; asm volatile("mfspr %0, %1" : "=r" (timebase) : "i" (SPRN_TBRL)); do { prev = timebase; asm volatile("mfspr %0, %1" : "=r" (timebase) : "i" (SPRN_TBRL)); } while (prev != timebase); } #else timebase = get_tb(); #endif mb(); tb_valid = 1; while (tb_valid) barrier(); qoriq_pm_ops->freeze_time_base(false); local_irq_restore(flags); } static void mpc85xx_take_timebase(void) { unsigned long flags; local_irq_save(flags); hard_irq_disable(); tb_req = 1; while (!tb_valid) barrier(); set_tb(timebase >> 32, timebase & 0xffffffff); isync(); tb_valid = 0; local_irq_restore(flags); } #ifdef CONFIG_HOTPLUG_CPU static void smp_85xx_cpu_offline_self(void) { unsigned int cpu = smp_processor_id(); local_irq_disable(); hard_irq_disable(); /* mask all irqs to prevent cpu wakeup */ qoriq_pm_ops->irq_mask(cpu); idle_task_exit(); mtspr(SPRN_TCR, 0); mtspr(SPRN_TSR, mfspr(SPRN_TSR)); generic_set_cpu_dead(cpu); cur_cpu_spec->cpu_down_flush(); qoriq_pm_ops->cpu_die(cpu); while (1) ; } static void qoriq_cpu_kill(unsigned int cpu) { int i; for (i = 0; i < 500; i++) { if (is_cpu_dead(cpu)) { #ifdef CONFIG_PPC64 paca_ptrs[cpu]->cpu_start = 0; #endif return; } msleep(20); } pr_err("CPU%d didn't die...\n", cpu); } #endif /* * To keep it compatible with old boot program which uses * cache-inhibit spin table, we need to flush the cache * before accessing spin table to invalidate any staled data. * We also need to flush the cache after writing to spin * table to push data out. */ static inline void flush_spin_table(void *spin_table) { flush_dcache_range((ulong)spin_table, (ulong)spin_table + sizeof(struct epapr_spin_table)); } static inline u32 read_spin_table_addr_l(void *spin_table) { flush_dcache_range((ulong)spin_table, (ulong)spin_table + sizeof(struct epapr_spin_table)); return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l); } #ifdef CONFIG_PPC64 static void wake_hw_thread(void *info) { void fsl_secondary_thread_init(void); unsigned long inia; int cpu = *(const int *)info; inia = ppc_function_entry(fsl_secondary_thread_init); book3e_start_thread(cpu_thread_in_core(cpu), inia); } #endif static int smp_85xx_start_cpu(int cpu) { int ret = 0; struct device_node *np; const u64 *cpu_rel_addr; unsigned long flags; int ioremappable; int hw_cpu = get_hard_smp_processor_id(cpu); struct epapr_spin_table __iomem *spin_table; np = of_get_cpu_node(cpu, NULL); cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); if (!cpu_rel_addr) { pr_err("No cpu-release-addr for cpu %d\n", cpu); return -ENOENT; } /* * A secondary core could be in a spinloop in the bootpage * (0xfffff000), somewhere in highmem, or somewhere in lowmem. * The bootpage and highmem can be accessed via ioremap(), but * we need to directly access the spinloop if its in lowmem. */ ioremappable = *cpu_rel_addr > virt_to_phys(high_memory - 1); /* Map the spin table */ if (ioremappable) spin_table = ioremap_coherent(*cpu_rel_addr, sizeof(struct epapr_spin_table)); else spin_table = phys_to_virt(*cpu_rel_addr); local_irq_save(flags); hard_irq_disable(); if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare) qoriq_pm_ops->cpu_up_prepare(cpu); /* if cpu is not spinning, reset it */ if (read_spin_table_addr_l(spin_table) != 1) { /* * We don't set the BPTR register here since it already points * to the boot page properly. */ mpic_reset_core(cpu); /* * wait until core is ready... * We need to invalidate the stale data, in case the boot * loader uses a cache-inhibited spin table. */ if (!spin_event_timeout( read_spin_table_addr_l(spin_table) == 1, 10000, 100)) { pr_err("timeout waiting for cpu %d to reset\n", hw_cpu); ret = -EAGAIN; goto err; } } flush_spin_table(spin_table); out_be32(&spin_table->pir, hw_cpu); #ifdef CONFIG_PPC64 out_be64((u64 *)(&spin_table->addr_h), __pa(ppc_function_entry(generic_secondary_smp_init))); #else #ifdef CONFIG_PHYS_ADDR_T_64BIT /* * We need also to write addr_h to spin table for systems * in which their physical memory start address was configured * to above 4G, otherwise the secondary core can not get * correct entry to start from. */ out_be32(&spin_table->addr_h, __pa(__early_start) >> 32); #endif out_be32(&spin_table->addr_l, __pa(__early_start)); #endif flush_spin_table(spin_table); err: local_irq_restore(flags); if (ioremappable) iounmap(spin_table); return ret; } static int smp_85xx_kick_cpu(int nr) { int ret = 0; #ifdef CONFIG_PPC64 int primary = nr; #endif WARN_ON(nr < 0 || nr >= num_possible_cpus()); pr_debug("kick CPU #%d\n", nr); #ifdef CONFIG_PPC64 if (threads_per_core == 2) { if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT))) return -ENOENT; booting_thread_hwid = cpu_thread_in_core(nr); primary = cpu_first_thread_sibling(nr); if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare) qoriq_pm_ops->cpu_up_prepare(nr); /* * If either thread in the core is online, use it to start * the other. */ if (cpu_online(primary)) { smp_call_function_single(primary, wake_hw_thread, &nr, 1); goto done; } else if (cpu_online(primary + 1)) { smp_call_function_single(primary + 1, wake_hw_thread, &nr, 1); goto done; } /* * If getting here, it means both threads in the core are * offline. So start the primary thread, then it will start * the thread specified in booting_thread_hwid, the one * corresponding to nr. */ } else if (threads_per_core == 1) { /* * If one core has only one thread, set booting_thread_hwid to * an invalid value. */ booting_thread_hwid = INVALID_THREAD_HWID; } else if (threads_per_core > 2) { pr_err("Do not support more than 2 threads per CPU."); return -EINVAL; } ret = smp_85xx_start_cpu(primary); if (ret) return ret; done: paca_ptrs[nr]->cpu_start = 1; generic_set_cpu_up(nr); return ret; #else ret = smp_85xx_start_cpu(nr); if (ret) return ret; generic_set_cpu_up(nr); return ret; #endif } struct smp_ops_t smp_85xx_ops = { .cause_nmi_ipi = NULL, .kick_cpu = smp_85xx_kick_cpu, .cpu_bootable = smp_generic_cpu_bootable, #ifdef CONFIG_HOTPLUG_CPU .cpu_disable = generic_cpu_disable, .cpu_die = generic_cpu_die, #endif #if defined(CONFIG_KEXEC_CORE) && !defined(CONFIG_PPC64) .give_timebase = smp_generic_give_timebase, .take_timebase = smp_generic_take_timebase, #endif }; #ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_PPC32 atomic_t kexec_down_cpus = ATOMIC_INIT(0); static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) { local_irq_disable(); if (secondary) { cur_cpu_spec->cpu_down_flush(); atomic_inc(&kexec_down_cpus); /* loop forever */ while (1); } } static void mpc85xx_smp_kexec_down(void *arg) { if (ppc_md.kexec_cpu_down) ppc_md.kexec_cpu_down(0,1); } #else static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) { int cpu = smp_processor_id(); int sibling = cpu_last_thread_sibling(cpu); bool notified = false; int disable_cpu; int disable_threadbit = 0; long start = mftb(); long now; local_irq_disable(); hard_irq_disable(); mpic_teardown_this_cpu(secondary); if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) { /* * We enter the crash kernel on whatever cpu crashed, * even if it's a secondary thread. If that's the case, * disable the corresponding primary thread. */ disable_threadbit = 1; disable_cpu = cpu_first_thread_sibling(cpu); } else if (sibling != crashing_cpu && cpu_thread_in_core(cpu) == 0 && cpu_thread_in_core(sibling) != 0) { disable_threadbit = 2; disable_cpu = sibling; } if (disable_threadbit) { while (paca_ptrs[disable_cpu]->kexec_state < KEXEC_STATE_REAL_MODE) { barrier(); now = mftb(); if (!notified && now - start > 1000000) { pr_info("%s/%d: waiting for cpu %d to enter KEXEC_STATE_REAL_MODE (%d)\n", __func__, smp_processor_id(), disable_cpu, paca_ptrs[disable_cpu]->kexec_state); notified = true; } } if (notified) { pr_info("%s: cpu %d done waiting\n", __func__, disable_cpu); } mtspr(SPRN_TENC, disable_threadbit); while (mfspr(SPRN_TENSR) & disable_threadbit) cpu_relax(); } } #endif static void mpc85xx_smp_machine_kexec(struct kimage *image) { #ifdef CONFIG_PPC32 int timeout = INT_MAX; int i, num_cpus = num_present_cpus(); if (image->type == KEXEC_TYPE_DEFAULT) smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) && ( timeout > 0 ) ) { timeout--; } if ( !timeout ) printk(KERN_ERR "Unable to bring down secondary cpu(s)"); for_each_online_cpu(i) { if ( i == smp_processor_id() ) continue; mpic_reset_core(i); } #endif default_machine_kexec(image); } #endif /* CONFIG_KEXEC_CORE */ static void smp_85xx_setup_cpu(int cpu_nr) { mpic_setup_this_cpu(); } void __init mpc85xx_smp_init(void) { struct device_node *np; np = of_find_node_by_type(NULL, "open-pic"); if (np) { smp_85xx_ops.probe = smp_mpic_probe; smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu; smp_85xx_ops.message_pass = smp_mpic_message_pass; } else smp_85xx_ops.setup_cpu = NULL; if (cpu_has_feature(CPU_FTR_DBELL)) { /* * If left NULL, .message_pass defaults to * smp_muxed_ipi_message_pass */ smp_85xx_ops.message_pass = NULL; smp_85xx_ops.cause_ipi = doorbell_global_ipi; smp_85xx_ops.probe = NULL; } #ifdef CONFIG_FSL_CORENET_RCPM /* Assign a value to qoriq_pm_ops on PPC_E500MC */ fsl_rcpm_init(); #else /* Assign a value to qoriq_pm_ops on !PPC_E500MC */ mpc85xx_setup_pmc(); #endif if (qoriq_pm_ops) { smp_85xx_ops.give_timebase = mpc85xx_give_timebase; smp_85xx_ops.take_timebase = mpc85xx_take_timebase; #ifdef CONFIG_HOTPLUG_CPU smp_85xx_ops.cpu_offline_self = smp_85xx_cpu_offline_self; smp_85xx_ops.cpu_die = qoriq_cpu_kill; #endif } smp_ops = &smp_85xx_ops; #ifdef CONFIG_KEXEC_CORE ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down; ppc_md.machine_kexec = mpc85xx_smp_machine_kexec; #endif }
linux-master
arch/powerpc/platforms/85xx/smp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * MPC85xx PM operators * * Copyright 2015 Freescale Semiconductor Inc. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/fsl/guts.h> #include <asm/io.h> #include <asm/fsl_pm.h> #include "smp.h" static struct ccsr_guts __iomem *guts; #ifdef CONFIG_FSL_PMC static void mpc85xx_irq_mask(int cpu) { } static void mpc85xx_irq_unmask(int cpu) { } static void mpc85xx_cpu_die(int cpu) { u32 tmp; tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP; mtspr(SPRN_HID0, tmp); /* Enter NAP mode. */ tmp = mfmsr(); tmp |= MSR_WE; asm volatile( "msync\n" "mtmsr %0\n" "isync\n" : : "r" (tmp)); } static void mpc85xx_cpu_up_prepare(int cpu) { } #endif static void mpc85xx_freeze_time_base(bool freeze) { uint32_t mask; mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1; if (freeze) setbits32(&guts->devdisr, mask); else clrbits32(&guts->devdisr, mask); in_be32(&guts->devdisr); } static const struct of_device_id mpc85xx_smp_guts_ids[] = { { .compatible = "fsl,mpc8572-guts", }, { .compatible = "fsl,p1020-guts", }, { .compatible = "fsl,p1021-guts", }, { .compatible = "fsl,p1022-guts", }, { .compatible = "fsl,p1023-guts", }, { .compatible = "fsl,p2020-guts", }, { .compatible = "fsl,bsc9132-guts", }, {}, }; static const struct fsl_pm_ops mpc85xx_pm_ops = { .freeze_time_base = mpc85xx_freeze_time_base, #ifdef CONFIG_FSL_PMC .irq_mask = mpc85xx_irq_mask, .irq_unmask = mpc85xx_irq_unmask, .cpu_die = mpc85xx_cpu_die, .cpu_up_prepare = mpc85xx_cpu_up_prepare, #endif }; int __init mpc85xx_setup_pmc(void) { struct device_node *np; np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids); if (np) { guts = of_iomap(np, 0); of_node_put(np); if (!guts) { pr_err("Could not map guts node address\n"); return -ENOMEM; } qoriq_pm_ops = &mpc85xx_pm_ops; } return 0; }
linux-master
arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c