python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* PCI / PCI-X / PCI-Express support for 4xx parts
*
* Copyright 2007 Ben. Herrenschmidt <[email protected]>, IBM Corp.
*
* Most PCI Express code is coming from Stefan Roese implementation for
* arch/ppc in the Denx tree, slightly reworked by me.
*
* Copyright 2007 DENX Software Engineering, Stefan Roese <[email protected]>
*
* Some of that comes itself from a previous implementation for 440SPE only
* by Roland Dreier:
*
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Roland Dreier <[email protected]>
*
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <mm/mmu_decl.h>
#include "pci.h"
static int dma_offset_set;
#define U64_TO_U32_LOW(val) ((u32)((val) & 0x00000000ffffffffULL))
#define U64_TO_U32_HIGH(val) ((u32)((val) >> 32))
#define RES_TO_U32_LOW(val) \
((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_LOW(val) : (val))
#define RES_TO_U32_HIGH(val) \
((sizeof(resource_size_t) > sizeof(u32)) ? U64_TO_U32_HIGH(val) : (0))
static inline int ppc440spe_revA(void)
{
/* Catch both 440SPe variants, with and without RAID6 support */
if ((mfspr(SPRN_PVR) & 0xffefffff) == 0x53421890)
return 1;
else
return 0;
}
static void fixup_ppc4xx_pci_bridge(struct pci_dev *dev)
{
struct pci_controller *hose;
struct resource *r;
if (dev->devfn != 0 || dev->bus->self != NULL)
return;
hose = pci_bus_to_host(dev->bus);
if (hose == NULL)
return;
if (!of_device_is_compatible(hose->dn, "ibm,plb-pciex") &&
!of_device_is_compatible(hose->dn, "ibm,plb-pcix") &&
!of_device_is_compatible(hose->dn, "ibm,plb-pci"))
return;
if (of_device_is_compatible(hose->dn, "ibm,plb440epx-pci") ||
of_device_is_compatible(hose->dn, "ibm,plb440grx-pci")) {
hose->indirect_type |= PPC_INDIRECT_TYPE_BROKEN_MRM;
}
/* Hide the PCI host BARs from the kernel as their content doesn't
* fit well in the resource management
*/
pci_dev_for_each_resource(dev, r) {
r->start = r->end = 0;
r->flags = 0;
}
printk(KERN_INFO "PCI: Hiding 4xx host bridge resources %s\n",
pci_name(dev));
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, fixup_ppc4xx_pci_bridge);
static int __init ppc4xx_parse_dma_ranges(struct pci_controller *hose,
void __iomem *reg,
struct resource *res)
{
u64 size;
const u32 *ranges;
int rlen;
int pna = of_n_addr_cells(hose->dn);
int np = pna + 5;
/* Default */
res->start = 0;
size = 0x80000000;
res->end = size - 1;
res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
/* Get dma-ranges property */
ranges = of_get_property(hose->dn, "dma-ranges", &rlen);
if (ranges == NULL)
goto out;
/* Walk it */
while ((rlen -= np * 4) >= 0) {
u32 pci_space = ranges[0];
u64 pci_addr = of_read_number(ranges + 1, 2);
u64 cpu_addr = of_translate_dma_address(hose->dn, ranges + 3);
size = of_read_number(ranges + pna + 3, 2);
ranges += np;
if (cpu_addr == OF_BAD_ADDR || size == 0)
continue;
/* We only care about memory */
if ((pci_space & 0x03000000) != 0x02000000)
continue;
/* We currently only support memory at 0, and pci_addr
* within 32 bits space
*/
if (cpu_addr != 0 || pci_addr > 0xffffffff) {
printk(KERN_WARNING "%pOF: Ignored unsupported dma range"
" 0x%016llx...0x%016llx -> 0x%016llx\n",
hose->dn,
pci_addr, pci_addr + size - 1, cpu_addr);
continue;
}
/* Check if not prefetchable */
if (!(pci_space & 0x40000000))
res->flags &= ~IORESOURCE_PREFETCH;
/* Use that */
res->start = pci_addr;
/* Beware of 32 bits resources */
if (sizeof(resource_size_t) == sizeof(u32) &&
(pci_addr + size) > 0x100000000ull)
res->end = 0xffffffff;
else
res->end = res->start + size - 1;
break;
}
/* We only support one global DMA offset */
if (dma_offset_set && pci_dram_offset != res->start) {
printk(KERN_ERR "%pOF: dma-ranges(s) mismatch\n", hose->dn);
return -ENXIO;
}
/* Check that we can fit all of memory as we don't support
* DMA bounce buffers
*/
if (size < total_memory) {
printk(KERN_ERR "%pOF: dma-ranges too small "
"(size=%llx total_memory=%llx)\n",
hose->dn, size, (u64)total_memory);
return -ENXIO;
}
/* Check we are a power of 2 size and that base is a multiple of size*/
if ((size & (size - 1)) != 0 ||
(res->start & (size - 1)) != 0) {
printk(KERN_ERR "%pOF: dma-ranges unaligned\n", hose->dn);
return -ENXIO;
}
/* Check that we are fully contained within 32 bits space if we are not
* running on a 460sx or 476fpe which have 64 bit bus addresses.
*/
if (res->end > 0xffffffff &&
!(of_device_is_compatible(hose->dn, "ibm,plb-pciex-460sx")
|| of_device_is_compatible(hose->dn, "ibm,plb-pciex-476fpe"))) {
printk(KERN_ERR "%pOF: dma-ranges outside of 32 bits space\n",
hose->dn);
return -ENXIO;
}
out:
dma_offset_set = 1;
pci_dram_offset = res->start;
hose->dma_window_base_cur = res->start;
hose->dma_window_size = resource_size(res);
printk(KERN_INFO "4xx PCI DMA offset set to 0x%08lx\n",
pci_dram_offset);
printk(KERN_INFO "4xx PCI DMA window base to 0x%016llx\n",
(unsigned long long)hose->dma_window_base_cur);
printk(KERN_INFO "DMA window size 0x%016llx\n",
(unsigned long long)hose->dma_window_size);
return 0;
}
/*
* 4xx PCI 2.x part
*/
static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
void __iomem *reg,
u64 plb_addr,
u64 pci_addr,
u64 size,
unsigned int flags,
int index)
{
u32 ma, pcila, pciha;
/* Hack warning ! The "old" PCI 2.x cell only let us configure the low
* 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
* address are actually hard wired to a value that appears to depend
* on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
*
* The trick here is we just crop those top bits and ignore them when
* programming the chip. That means the device-tree has to be right
* for the specific part used (we don't print a warning if it's wrong
* but on the other hand, you'll crash quickly enough), but at least
* this code should work whatever the hard coded value is
*/
plb_addr &= 0xffffffffull;
/* Note: Due to the above hack, the test below doesn't actually test
* if you address is above 4G, but it tests that address and
* (address + size) are both contained in the same 4G
*/
if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
size < 0x1000 || (plb_addr & (size - 1)) != 0) {
printk(KERN_WARNING "%pOF: Resource out of range\n", hose->dn);
return -1;
}
ma = (0xffffffffu << ilog2(size)) | 1;
if (flags & IORESOURCE_PREFETCH)
ma |= 2;
pciha = RES_TO_U32_HIGH(pci_addr);
pcila = RES_TO_U32_LOW(pci_addr);
writel(plb_addr, reg + PCIL0_PMM0LA + (0x10 * index));
writel(pcila, reg + PCIL0_PMM0PCILA + (0x10 * index));
writel(pciha, reg + PCIL0_PMM0PCIHA + (0x10 * index));
writel(ma, reg + PCIL0_PMM0MA + (0x10 * index));
return 0;
}
static void __init ppc4xx_configure_pci_PMMs(struct pci_controller *hose,
void __iomem *reg)
{
int i, j, found_isa_hole = 0;
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
struct resource *res = &hose->mem_resources[i];
resource_size_t offset = hose->mem_offset[i];
/* we only care about memory windows */
if (!(res->flags & IORESOURCE_MEM))
continue;
if (j > 2) {
printk(KERN_WARNING "%pOF: Too many ranges\n", hose->dn);
break;
}
/* Configure the resource */
if (ppc4xx_setup_one_pci_PMM(hose, reg,
res->start,
res->start - offset,
resource_size(res),
res->flags,
j) == 0) {
j++;
/* If the resource PCI address is 0 then we have our
* ISA memory hole
*/
if (res->start == offset)
found_isa_hole = 1;
}
}
/* Handle ISA memory hole if not already covered */
if (j <= 2 && !found_isa_hole && hose->isa_mem_size)
if (ppc4xx_setup_one_pci_PMM(hose, reg, hose->isa_mem_phys, 0,
hose->isa_mem_size, 0, j) == 0)
printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n",
hose->dn);
}
static void __init ppc4xx_configure_pci_PTMs(struct pci_controller *hose,
void __iomem *reg,
const struct resource *res)
{
resource_size_t size = resource_size(res);
u32 sa;
/* Calculate window size */
sa = (0xffffffffu << ilog2(size)) | 1;
sa |= 0x1;
/* RAM is always at 0 local for now */
writel(0, reg + PCIL0_PTM1LA);
writel(sa, reg + PCIL0_PTM1MS);
/* Map on PCI side */
early_write_config_dword(hose, hose->first_busno, 0,
PCI_BASE_ADDRESS_1, res->start);
early_write_config_dword(hose, hose->first_busno, 0,
PCI_BASE_ADDRESS_2, 0x00000000);
early_write_config_word(hose, hose->first_busno, 0,
PCI_COMMAND, 0x0006);
}
static void __init ppc4xx_probe_pci_bridge(struct device_node *np)
{
/* NYI */
struct resource rsrc_cfg;
struct resource rsrc_reg;
struct resource dma_window;
struct pci_controller *hose = NULL;
void __iomem *reg = NULL;
const int *bus_range;
int primary = 0;
/* Check if device is enabled */
if (!of_device_is_available(np)) {
printk(KERN_INFO "%pOF: Port disabled via device-tree\n", np);
return;
}
/* Fetch config space registers address */
if (of_address_to_resource(np, 0, &rsrc_cfg)) {
printk(KERN_ERR "%pOF: Can't get PCI config register base !",
np);
return;
}
/* Fetch host bridge internal registers address */
if (of_address_to_resource(np, 3, &rsrc_reg)) {
printk(KERN_ERR "%pOF: Can't get PCI internal register base !",
np);
return;
}
/* Check if primary bridge */
if (of_property_read_bool(np, "primary"))
primary = 1;
/* Get bus range if any */
bus_range = of_get_property(np, "bus-range", NULL);
/* Map registers */
reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
if (reg == NULL) {
printk(KERN_ERR "%pOF: Can't map registers !", np);
goto fail;
}
/* Allocate the host controller data structure */
hose = pcibios_alloc_controller(np);
if (!hose)
goto fail;
hose->first_busno = bus_range ? bus_range[0] : 0x0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
/* Setup config space */
setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4, 0);
/* Disable all windows */
writel(0, reg + PCIL0_PMM0MA);
writel(0, reg + PCIL0_PMM1MA);
writel(0, reg + PCIL0_PMM2MA);
writel(0, reg + PCIL0_PTM1MS);
writel(0, reg + PCIL0_PTM2MS);
/* Parse outbound mapping resources */
pci_process_bridge_OF_ranges(hose, np, primary);
/* Parse inbound mapping resources */
if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
goto fail;
/* Configure outbound ranges POMs */
ppc4xx_configure_pci_PMMs(hose, reg);
/* Configure inbound ranges PIMs */
ppc4xx_configure_pci_PTMs(hose, reg, &dma_window);
/* We don't need the registers anymore */
iounmap(reg);
return;
fail:
if (hose)
pcibios_free_controller(hose);
if (reg)
iounmap(reg);
}
/*
* 4xx PCI-X part
*/
static int __init ppc4xx_setup_one_pcix_POM(struct pci_controller *hose,
void __iomem *reg,
u64 plb_addr,
u64 pci_addr,
u64 size,
unsigned int flags,
int index)
{
u32 lah, lal, pciah, pcial, sa;
if (!is_power_of_2(size) || size < 0x1000 ||
(plb_addr & (size - 1)) != 0) {
printk(KERN_WARNING "%pOF: Resource out of range\n",
hose->dn);
return -1;
}
/* Calculate register values */
lah = RES_TO_U32_HIGH(plb_addr);
lal = RES_TO_U32_LOW(plb_addr);
pciah = RES_TO_U32_HIGH(pci_addr);
pcial = RES_TO_U32_LOW(pci_addr);
sa = (0xffffffffu << ilog2(size)) | 0x1;
/* Program register values */
if (index == 0) {
writel(lah, reg + PCIX0_POM0LAH);
writel(lal, reg + PCIX0_POM0LAL);
writel(pciah, reg + PCIX0_POM0PCIAH);
writel(pcial, reg + PCIX0_POM0PCIAL);
writel(sa, reg + PCIX0_POM0SA);
} else {
writel(lah, reg + PCIX0_POM1LAH);
writel(lal, reg + PCIX0_POM1LAL);
writel(pciah, reg + PCIX0_POM1PCIAH);
writel(pcial, reg + PCIX0_POM1PCIAL);
writel(sa, reg + PCIX0_POM1SA);
}
return 0;
}
static void __init ppc4xx_configure_pcix_POMs(struct pci_controller *hose,
void __iomem *reg)
{
int i, j, found_isa_hole = 0;
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
struct resource *res = &hose->mem_resources[i];
resource_size_t offset = hose->mem_offset[i];
/* we only care about memory windows */
if (!(res->flags & IORESOURCE_MEM))
continue;
if (j > 1) {
printk(KERN_WARNING "%pOF: Too many ranges\n", hose->dn);
break;
}
/* Configure the resource */
if (ppc4xx_setup_one_pcix_POM(hose, reg,
res->start,
res->start - offset,
resource_size(res),
res->flags,
j) == 0) {
j++;
/* If the resource PCI address is 0 then we have our
* ISA memory hole
*/
if (res->start == offset)
found_isa_hole = 1;
}
}
/* Handle ISA memory hole if not already covered */
if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
if (ppc4xx_setup_one_pcix_POM(hose, reg, hose->isa_mem_phys, 0,
hose->isa_mem_size, 0, j) == 0)
printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n",
hose->dn);
}
static void __init ppc4xx_configure_pcix_PIMs(struct pci_controller *hose,
void __iomem *reg,
const struct resource *res,
int big_pim,
int enable_msi_hole)
{
resource_size_t size = resource_size(res);
u32 sa;
/* RAM is always at 0 */
writel(0x00000000, reg + PCIX0_PIM0LAH);
writel(0x00000000, reg + PCIX0_PIM0LAL);
/* Calculate window size */
sa = (0xffffffffu << ilog2(size)) | 1;
sa |= 0x1;
if (res->flags & IORESOURCE_PREFETCH)
sa |= 0x2;
if (enable_msi_hole)
sa |= 0x4;
writel(sa, reg + PCIX0_PIM0SA);
if (big_pim)
writel(0xffffffff, reg + PCIX0_PIM0SAH);
/* Map on PCI side */
writel(0x00000000, reg + PCIX0_BAR0H);
writel(res->start, reg + PCIX0_BAR0L);
writew(0x0006, reg + PCIX0_COMMAND);
}
static void __init ppc4xx_probe_pcix_bridge(struct device_node *np)
{
struct resource rsrc_cfg;
struct resource rsrc_reg;
struct resource dma_window;
struct pci_controller *hose = NULL;
void __iomem *reg = NULL;
const int *bus_range;
int big_pim, msi, primary;
/* Fetch config space registers address */
if (of_address_to_resource(np, 0, &rsrc_cfg)) {
printk(KERN_ERR "%pOF: Can't get PCI-X config register base !",
np);
return;
}
/* Fetch host bridge internal registers address */
if (of_address_to_resource(np, 3, &rsrc_reg)) {
printk(KERN_ERR "%pOF: Can't get PCI-X internal register base !",
np);
return;
}
/* Check if it supports large PIMs (440GX) */
big_pim = of_property_read_bool(np, "large-inbound-windows");
/* Check if we should enable MSIs inbound hole */
msi = of_property_read_bool(np, "enable-msi-hole");
/* Check if primary bridge */
primary = of_property_read_bool(np, "primary");
/* Get bus range if any */
bus_range = of_get_property(np, "bus-range", NULL);
/* Map registers */
reg = ioremap(rsrc_reg.start, resource_size(&rsrc_reg));
if (reg == NULL) {
printk(KERN_ERR "%pOF: Can't map registers !", np);
goto fail;
}
/* Allocate the host controller data structure */
hose = pcibios_alloc_controller(np);
if (!hose)
goto fail;
hose->first_busno = bus_range ? bus_range[0] : 0x0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
/* Setup config space */
setup_indirect_pci(hose, rsrc_cfg.start, rsrc_cfg.start + 0x4,
PPC_INDIRECT_TYPE_SET_CFG_TYPE);
/* Disable all windows */
writel(0, reg + PCIX0_POM0SA);
writel(0, reg + PCIX0_POM1SA);
writel(0, reg + PCIX0_POM2SA);
writel(0, reg + PCIX0_PIM0SA);
writel(0, reg + PCIX0_PIM1SA);
writel(0, reg + PCIX0_PIM2SA);
if (big_pim) {
writel(0, reg + PCIX0_PIM0SAH);
writel(0, reg + PCIX0_PIM2SAH);
}
/* Parse outbound mapping resources */
pci_process_bridge_OF_ranges(hose, np, primary);
/* Parse inbound mapping resources */
if (ppc4xx_parse_dma_ranges(hose, reg, &dma_window) != 0)
goto fail;
/* Configure outbound ranges POMs */
ppc4xx_configure_pcix_POMs(hose, reg);
/* Configure inbound ranges PIMs */
ppc4xx_configure_pcix_PIMs(hose, reg, &dma_window, big_pim, msi);
/* We don't need the registers anymore */
iounmap(reg);
return;
fail:
if (hose)
pcibios_free_controller(hose);
if (reg)
iounmap(reg);
}
#ifdef CONFIG_PPC4xx_PCI_EXPRESS
/*
* 4xx PCI-Express part
*
* We support 3 parts currently based on the compatible property:
*
* ibm,plb-pciex-440spe
* ibm,plb-pciex-405ex
* ibm,plb-pciex-460ex
*
* Anything else will be rejected for now as they are all subtly
* different unfortunately.
*
*/
#define MAX_PCIE_BUS_MAPPED 0x40
struct ppc4xx_pciex_port
{
struct pci_controller *hose;
struct device_node *node;
unsigned int index;
int endpoint;
int link;
int has_ibpre;
unsigned int sdr_base;
dcr_host_t dcrs;
struct resource cfg_space;
struct resource utl_regs;
void __iomem *utl_base;
};
static struct ppc4xx_pciex_port *ppc4xx_pciex_ports;
static unsigned int ppc4xx_pciex_port_count;
struct ppc4xx_pciex_hwops
{
bool want_sdr;
int (*core_init)(struct device_node *np);
int (*port_init_hw)(struct ppc4xx_pciex_port *port);
int (*setup_utl)(struct ppc4xx_pciex_port *port);
void (*check_link)(struct ppc4xx_pciex_port *port);
};
static struct ppc4xx_pciex_hwops *ppc4xx_pciex_hwops;
static int __init ppc4xx_pciex_wait_on_sdr(struct ppc4xx_pciex_port *port,
unsigned int sdr_offset,
unsigned int mask,
unsigned int value,
int timeout_ms)
{
u32 val;
while(timeout_ms--) {
val = mfdcri(SDR0, port->sdr_base + sdr_offset);
if ((val & mask) == value) {
pr_debug("PCIE%d: Wait on SDR %x success with tm %d (%08x)\n",
port->index, sdr_offset, timeout_ms, val);
return 0;
}
msleep(1);
}
return -1;
}
static int __init ppc4xx_pciex_port_reset_sdr(struct ppc4xx_pciex_port *port)
{
/* Wait for reset to complete */
if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS, 1 << 20, 0, 10)) {
printk(KERN_WARNING "PCIE%d: PGRST failed\n",
port->index);
return -1;
}
return 0;
}
static void __init ppc4xx_pciex_check_link_sdr(struct ppc4xx_pciex_port *port)
{
printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
/* Check for card presence detect if supported, if not, just wait for
* link unconditionally.
*
* note that we don't fail if there is no link, we just filter out
* config space accesses. That way, it will be easier to implement
* hotplug later on.
*/
if (!port->has_ibpre ||
!ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
1 << 28, 1 << 28, 100)) {
printk(KERN_INFO
"PCIE%d: Device detected, waiting for link...\n",
port->index);
if (ppc4xx_pciex_wait_on_sdr(port, PESDRn_LOOP,
0x1000, 0x1000, 2000))
printk(KERN_WARNING
"PCIE%d: Link up failed\n", port->index);
else {
printk(KERN_INFO
"PCIE%d: link is up !\n", port->index);
port->link = 1;
}
} else
printk(KERN_INFO "PCIE%d: No device detected.\n", port->index);
}
#ifdef CONFIG_44x
/* Check various reset bits of the 440SPe PCIe core */
static int __init ppc440spe_pciex_check_reset(struct device_node *np)
{
u32 valPE0, valPE1, valPE2;
int err = 0;
/* SDR0_PEGPLLLCT1 reset */
if (!(mfdcri(SDR0, PESDR0_PLLLCT1) & 0x01000000)) {
/*
* the PCIe core was probably already initialised
* by firmware - let's re-reset RCSSET regs
*
* -- Shouldn't we also re-reset the whole thing ? -- BenH
*/
pr_debug("PCIE: SDR0_PLLLCT1 already reset.\n");
mtdcri(SDR0, PESDR0_440SPE_RCSSET, 0x01010000);
mtdcri(SDR0, PESDR1_440SPE_RCSSET, 0x01010000);
mtdcri(SDR0, PESDR2_440SPE_RCSSET, 0x01010000);
}
valPE0 = mfdcri(SDR0, PESDR0_440SPE_RCSSET);
valPE1 = mfdcri(SDR0, PESDR1_440SPE_RCSSET);
valPE2 = mfdcri(SDR0, PESDR2_440SPE_RCSSET);
/* SDR0_PExRCSSET rstgu */
if (!(valPE0 & 0x01000000) ||
!(valPE1 & 0x01000000) ||
!(valPE2 & 0x01000000)) {
printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstgu error\n");
err = -1;
}
/* SDR0_PExRCSSET rstdl */
if (!(valPE0 & 0x00010000) ||
!(valPE1 & 0x00010000) ||
!(valPE2 & 0x00010000)) {
printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstdl error\n");
err = -1;
}
/* SDR0_PExRCSSET rstpyn */
if ((valPE0 & 0x00001000) ||
(valPE1 & 0x00001000) ||
(valPE2 & 0x00001000)) {
printk(KERN_INFO "PCIE: SDR0_PExRCSSET rstpyn error\n");
err = -1;
}
/* SDR0_PExRCSSET hldplb */
if ((valPE0 & 0x10000000) ||
(valPE1 & 0x10000000) ||
(valPE2 & 0x10000000)) {
printk(KERN_INFO "PCIE: SDR0_PExRCSSET hldplb error\n");
err = -1;
}
/* SDR0_PExRCSSET rdy */
if ((valPE0 & 0x00100000) ||
(valPE1 & 0x00100000) ||
(valPE2 & 0x00100000)) {
printk(KERN_INFO "PCIE: SDR0_PExRCSSET rdy error\n");
err = -1;
}
/* SDR0_PExRCSSET shutdown */
if ((valPE0 & 0x00000100) ||
(valPE1 & 0x00000100) ||
(valPE2 & 0x00000100)) {
printk(KERN_INFO "PCIE: SDR0_PExRCSSET shutdown error\n");
err = -1;
}
return err;
}
/* Global PCIe core initializations for 440SPe core */
static int __init ppc440spe_pciex_core_init(struct device_node *np)
{
int time_out = 20;
/* Set PLL clock receiver to LVPECL */
dcri_clrset(SDR0, PESDR0_PLLLCT1, 0, 1 << 28);
/* Shouldn't we do all the calibration stuff etc... here ? */
if (ppc440spe_pciex_check_reset(np))
return -ENXIO;
if (!(mfdcri(SDR0, PESDR0_PLLLCT2) & 0x10000)) {
printk(KERN_INFO "PCIE: PESDR_PLLCT2 resistance calibration "
"failed (0x%08x)\n",
mfdcri(SDR0, PESDR0_PLLLCT2));
return -1;
}
/* De-assert reset of PCIe PLL, wait for lock */
dcri_clrset(SDR0, PESDR0_PLLLCT1, 1 << 24, 0);
udelay(3);
while (time_out) {
if (!(mfdcri(SDR0, PESDR0_PLLLCT3) & 0x10000000)) {
time_out--;
udelay(1);
} else
break;
}
if (!time_out) {
printk(KERN_INFO "PCIE: VCO output not locked\n");
return -1;
}
pr_debug("PCIE initialization OK\n");
return 3;
}
static int __init ppc440spe_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
u32 val = 1 << 24;
if (port->endpoint)
val = PTYPE_LEGACY_ENDPOINT << 20;
else
val = PTYPE_ROOT_PORT << 20;
if (port->index == 0)
val |= LNKW_X8 << 12;
else
val |= LNKW_X4 << 12;
mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x20222222);
if (ppc440spe_revA())
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x11000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL0SET1, 0x35000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL1SET1, 0x35000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL2SET1, 0x35000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL3SET1, 0x35000000);
if (port->index == 0) {
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL4SET1,
0x35000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL5SET1,
0x35000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL6SET1,
0x35000000);
mtdcri(SDR0, port->sdr_base + PESDRn_440SPE_HSSL7SET1,
0x35000000);
}
dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
(1 << 24) | (1 << 16), 1 << 12);
return ppc4xx_pciex_port_reset_sdr(port);
}
static int __init ppc440speA_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
return ppc440spe_pciex_init_port_hw(port);
}
static int __init ppc440speB_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
int rc = ppc440spe_pciex_init_port_hw(port);
port->has_ibpre = 1;
return rc;
}
static int ppc440speA_pciex_init_utl(struct ppc4xx_pciex_port *port)
{
/* XXX Check what that value means... I hate magic */
dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x68782800);
/*
* Set buffer allocations and then assert VRB and TXE.
*/
out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
out_be32(port->utl_base + PEUTL_OPDBSZ, 0x10000000);
out_be32(port->utl_base + PEUTL_PBBSZ, 0x53000000);
out_be32(port->utl_base + PEUTL_IPHBSZ, 0x08000000);
out_be32(port->utl_base + PEUTL_IPDBSZ, 0x10000000);
out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
return 0;
}
static int ppc440speB_pciex_init_utl(struct ppc4xx_pciex_port *port)
{
/* Report CRS to the operating system */
out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
return 0;
}
static struct ppc4xx_pciex_hwops ppc440speA_pcie_hwops __initdata =
{
.want_sdr = true,
.core_init = ppc440spe_pciex_core_init,
.port_init_hw = ppc440speA_pciex_init_port_hw,
.setup_utl = ppc440speA_pciex_init_utl,
.check_link = ppc4xx_pciex_check_link_sdr,
};
static struct ppc4xx_pciex_hwops ppc440speB_pcie_hwops __initdata =
{
.want_sdr = true,
.core_init = ppc440spe_pciex_core_init,
.port_init_hw = ppc440speB_pciex_init_port_hw,
.setup_utl = ppc440speB_pciex_init_utl,
.check_link = ppc4xx_pciex_check_link_sdr,
};
static int __init ppc460ex_pciex_core_init(struct device_node *np)
{
/* Nothing to do, return 2 ports */
return 2;
}
static int __init ppc460ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
u32 val;
u32 utlset1;
if (port->endpoint)
val = PTYPE_LEGACY_ENDPOINT << 20;
else
val = PTYPE_ROOT_PORT << 20;
if (port->index == 0) {
val |= LNKW_X1 << 12;
utlset1 = 0x20000000;
} else {
val |= LNKW_X4 << 12;
utlset1 = 0x20101101;
}
mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, utlset1);
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01210000);
switch (port->index) {
case 0:
mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST,0x10000000);
break;
case 1:
mtdcri(SDR0, PESDR1_460EX_L0CDRCTL, 0x00003230);
mtdcri(SDR0, PESDR1_460EX_L1CDRCTL, 0x00003230);
mtdcri(SDR0, PESDR1_460EX_L2CDRCTL, 0x00003230);
mtdcri(SDR0, PESDR1_460EX_L3CDRCTL, 0x00003230);
mtdcri(SDR0, PESDR1_460EX_L0DRV, 0x00000130);
mtdcri(SDR0, PESDR1_460EX_L1DRV, 0x00000130);
mtdcri(SDR0, PESDR1_460EX_L2DRV, 0x00000130);
mtdcri(SDR0, PESDR1_460EX_L3DRV, 0x00000130);
mtdcri(SDR0, PESDR1_460EX_L0CLK, 0x00000006);
mtdcri(SDR0, PESDR1_460EX_L1CLK, 0x00000006);
mtdcri(SDR0, PESDR1_460EX_L2CLK, 0x00000006);
mtdcri(SDR0, PESDR1_460EX_L3CLK, 0x00000006);
mtdcri(SDR0, PESDR1_460EX_PHY_CTL_RST,0x10000000);
break;
}
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
/* Poll for PHY reset */
/* XXX FIXME add timeout */
switch (port->index) {
case 0:
while (!(mfdcri(SDR0, PESDR0_460EX_RSTSTA) & 0x1))
udelay(10);
break;
case 1:
while (!(mfdcri(SDR0, PESDR1_460EX_RSTSTA) & 0x1))
udelay(10);
break;
}
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
(mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
PESDRx_RCSSET_RSTPYN);
port->has_ibpre = 1;
return ppc4xx_pciex_port_reset_sdr(port);
}
static int ppc460ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
{
dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
/*
* Set buffer allocations and then assert VRB and TXE.
*/
out_be32(port->utl_base + PEUTL_PBCTL, 0x0800000c);
out_be32(port->utl_base + PEUTL_OUTTR, 0x08000000);
out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
out_be32(port->utl_base + PEUTL_PBBSZ, 0x00000000);
out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
out_be32(port->utl_base + PEUTL_RCIRQEN,0x00f00000);
out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
return 0;
}
static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
{
.want_sdr = true,
.core_init = ppc460ex_pciex_core_init,
.port_init_hw = ppc460ex_pciex_init_port_hw,
.setup_utl = ppc460ex_pciex_init_utl,
.check_link = ppc4xx_pciex_check_link_sdr,
};
static int __init apm821xx_pciex_core_init(struct device_node *np)
{
/* Return the number of pcie port */
return 1;
}
static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
u32 val;
/*
* Do a software reset on PCIe ports.
* This code is to fix the issue that pci drivers doesn't re-assign
* bus number for PCIE devices after Uboot
* scanned and configured all the buses (eg. PCIE NIC IntelPro/1000
* PT quad port, SAS LSI 1064E)
*/
mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0);
mdelay(10);
if (port->endpoint)
val = PTYPE_LEGACY_ENDPOINT << 20;
else
val = PTYPE_ROOT_PORT << 20;
val |= LNKW_X1 << 12;
mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000);
mdelay(50);
mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000);
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
/* Poll for PHY reset */
val = PESDR0_460EX_RSTSTA - port->sdr_base;
if (ppc4xx_pciex_wait_on_sdr(port, val, 0x1, 1, 100)) {
printk(KERN_WARNING "%s: PCIE: Can't reset PHY\n", __func__);
return -EBUSY;
} else {
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
(mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
PESDRx_RCSSET_RSTPYN);
port->has_ibpre = 1;
return 0;
}
}
static struct ppc4xx_pciex_hwops apm821xx_pcie_hwops __initdata = {
.want_sdr = true,
.core_init = apm821xx_pciex_core_init,
.port_init_hw = apm821xx_pciex_init_port_hw,
.setup_utl = ppc460ex_pciex_init_utl,
.check_link = ppc4xx_pciex_check_link_sdr,
};
static int __init ppc460sx_pciex_core_init(struct device_node *np)
{
/* HSS drive amplitude */
mtdcri(SDR0, PESDR0_460SX_HSSL0DAMP, 0xB9843211);
mtdcri(SDR0, PESDR0_460SX_HSSL1DAMP, 0xB9843211);
mtdcri(SDR0, PESDR0_460SX_HSSL2DAMP, 0xB9843211);
mtdcri(SDR0, PESDR0_460SX_HSSL3DAMP, 0xB9843211);
mtdcri(SDR0, PESDR0_460SX_HSSL4DAMP, 0xB9843211);
mtdcri(SDR0, PESDR0_460SX_HSSL5DAMP, 0xB9843211);
mtdcri(SDR0, PESDR0_460SX_HSSL6DAMP, 0xB9843211);
mtdcri(SDR0, PESDR0_460SX_HSSL7DAMP, 0xB9843211);
mtdcri(SDR0, PESDR1_460SX_HSSL0DAMP, 0xB9843211);
mtdcri(SDR0, PESDR1_460SX_HSSL1DAMP, 0xB9843211);
mtdcri(SDR0, PESDR1_460SX_HSSL2DAMP, 0xB9843211);
mtdcri(SDR0, PESDR1_460SX_HSSL3DAMP, 0xB9843211);
mtdcri(SDR0, PESDR2_460SX_HSSL0DAMP, 0xB9843211);
mtdcri(SDR0, PESDR2_460SX_HSSL1DAMP, 0xB9843211);
mtdcri(SDR0, PESDR2_460SX_HSSL2DAMP, 0xB9843211);
mtdcri(SDR0, PESDR2_460SX_HSSL3DAMP, 0xB9843211);
/* HSS TX pre-emphasis */
mtdcri(SDR0, PESDR0_460SX_HSSL0COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR0_460SX_HSSL1COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR0_460SX_HSSL2COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR0_460SX_HSSL3COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR0_460SX_HSSL4COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR0_460SX_HSSL5COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR0_460SX_HSSL6COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR0_460SX_HSSL7COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR1_460SX_HSSL0COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR1_460SX_HSSL1COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR1_460SX_HSSL2COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR1_460SX_HSSL3COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR2_460SX_HSSL0COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR2_460SX_HSSL1COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR2_460SX_HSSL2COEFA, 0xDCB98987);
mtdcri(SDR0, PESDR2_460SX_HSSL3COEFA, 0xDCB98987);
/* HSS TX calibration control */
mtdcri(SDR0, PESDR0_460SX_HSSL1CALDRV, 0x22222222);
mtdcri(SDR0, PESDR1_460SX_HSSL1CALDRV, 0x22220000);
mtdcri(SDR0, PESDR2_460SX_HSSL1CALDRV, 0x22220000);
/* HSS TX slew control */
mtdcri(SDR0, PESDR0_460SX_HSSSLEW, 0xFFFFFFFF);
mtdcri(SDR0, PESDR1_460SX_HSSSLEW, 0xFFFF0000);
mtdcri(SDR0, PESDR2_460SX_HSSSLEW, 0xFFFF0000);
/* Set HSS PRBS enabled */
mtdcri(SDR0, PESDR0_460SX_HSSCTLSET, 0x00001130);
mtdcri(SDR0, PESDR2_460SX_HSSCTLSET, 0x00001130);
udelay(100);
/* De-assert PLLRESET */
dcri_clrset(SDR0, PESDR0_PLLLCT2, 0x00000100, 0);
/* Reset DL, UTL, GPL before configuration */
mtdcri(SDR0, PESDR0_460SX_RCSSET,
PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
mtdcri(SDR0, PESDR1_460SX_RCSSET,
PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
mtdcri(SDR0, PESDR2_460SX_RCSSET,
PESDRx_RCSSET_RSTDL | PESDRx_RCSSET_RSTGU);
udelay(100);
/*
* If bifurcation is not enabled, u-boot would have disabled the
* third PCIe port
*/
if (((mfdcri(SDR0, PESDR1_460SX_HSSCTLSET) & 0x00000001) ==
0x00000001)) {
printk(KERN_INFO "PCI: PCIE bifurcation setup successfully.\n");
printk(KERN_INFO "PCI: Total 3 PCIE ports are present\n");
return 3;
}
printk(KERN_INFO "PCI: Total 2 PCIE ports are present\n");
return 2;
}
static int __init ppc460sx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
if (port->endpoint)
dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
0x01000000, 0);
else
dcri_clrset(SDR0, port->sdr_base + PESDRn_UTLSET2,
0, 0x01000000);
dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET,
(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL),
PESDRx_RCSSET_RSTPYN);
port->has_ibpre = 1;
return ppc4xx_pciex_port_reset_sdr(port);
}
static int ppc460sx_pciex_init_utl(struct ppc4xx_pciex_port *port)
{
/* Max 128 Bytes */
out_be32 (port->utl_base + PEUTL_PBBSZ, 0x00000000);
/* Assert VRB and TXE - per datasheet turn off addr validation */
out_be32(port->utl_base + PEUTL_PCTL, 0x80800000);
return 0;
}
static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
{
void __iomem *mbase;
int attempt = 50;
port->link = 0;
mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
if (mbase == NULL) {
printk(KERN_ERR "%pOF: Can't map internal config space !",
port->node);
return;
}
while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA)
& PECFG_460SX_DLLSTA_LINKUP))) {
attempt--;
mdelay(10);
}
if (attempt)
port->link = 1;
iounmap(mbase);
}
static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
.want_sdr = true,
.core_init = ppc460sx_pciex_core_init,
.port_init_hw = ppc460sx_pciex_init_port_hw,
.setup_utl = ppc460sx_pciex_init_utl,
.check_link = ppc460sx_pciex_check_link,
};
#endif /* CONFIG_44x */
#ifdef CONFIG_40x
static int __init ppc405ex_pciex_core_init(struct device_node *np)
{
/* Nothing to do, return 2 ports */
return 2;
}
static void __init ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
{
/* Assert the PE0_PHY reset */
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
msleep(1);
/* deassert the PE0_hotreset */
if (port->endpoint)
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01111000);
else
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01101000);
/* poll for phy !reset */
/* XXX FIXME add timeout */
while (!(mfdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSTA) & 0x00001000))
;
/* deassert the PE0_gpl_utl_reset */
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x00101000);
}
static int __init ppc405ex_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
u32 val;
if (port->endpoint)
val = PTYPE_LEGACY_ENDPOINT;
else
val = PTYPE_ROOT_PORT;
mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET,
1 << 24 | val << 20 | LNKW_X1 << 12);
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET1, 0x720F0000);
mtdcri(SDR0, port->sdr_base + PESDRn_405EX_PHYSET2, 0x70600003);
/*
* Only reset the PHY when no link is currently established.
* This is for the Atheros PCIe board which has problems to establish
* the link (again) after this PHY reset. All other currently tested
* PCIe boards don't show this problem.
* This has to be re-tested and fixed in a later release!
*/
val = mfdcri(SDR0, port->sdr_base + PESDRn_LOOP);
if (!(val & 0x00001000))
ppc405ex_pcie_phy_reset(port);
dcr_write(port->dcrs, DCRO_PEGPL_CFG, 0x10000000); /* guarded on */
port->has_ibpre = 1;
return ppc4xx_pciex_port_reset_sdr(port);
}
static int ppc405ex_pciex_init_utl(struct ppc4xx_pciex_port *port)
{
dcr_write(port->dcrs, DCRO_PEGPL_SPECIAL, 0x0);
/*
* Set buffer allocations and then assert VRB and TXE.
*/
out_be32(port->utl_base + PEUTL_OUTTR, 0x02000000);
out_be32(port->utl_base + PEUTL_INTR, 0x02000000);
out_be32(port->utl_base + PEUTL_OPDBSZ, 0x04000000);
out_be32(port->utl_base + PEUTL_PBBSZ, 0x21000000);
out_be32(port->utl_base + PEUTL_IPHBSZ, 0x02000000);
out_be32(port->utl_base + PEUTL_IPDBSZ, 0x04000000);
out_be32(port->utl_base + PEUTL_RCIRQEN, 0x00f00000);
out_be32(port->utl_base + PEUTL_PCTL, 0x80800066);
out_be32(port->utl_base + PEUTL_PBCTL, 0x08000000);
return 0;
}
static struct ppc4xx_pciex_hwops ppc405ex_pcie_hwops __initdata =
{
.want_sdr = true,
.core_init = ppc405ex_pciex_core_init,
.port_init_hw = ppc405ex_pciex_init_port_hw,
.setup_utl = ppc405ex_pciex_init_utl,
.check_link = ppc4xx_pciex_check_link_sdr,
};
#endif /* CONFIG_40x */
#ifdef CONFIG_476FPE
static int __init ppc_476fpe_pciex_core_init(struct device_node *np)
{
return 4;
}
static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port)
{
u32 timeout_ms = 20;
u32 val = 0, mask = (PECFG_TLDLP_LNKUP|PECFG_TLDLP_PRESENT);
void __iomem *mbase = ioremap(port->cfg_space.start + 0x10000000,
0x1000);
printk(KERN_INFO "PCIE%d: Checking link...\n", port->index);
if (mbase == NULL) {
printk(KERN_WARNING "PCIE%d: failed to get cfg space\n",
port->index);
return;
}
while (timeout_ms--) {
val = in_le32(mbase + PECFG_TLDLP);
if ((val & mask) == mask)
break;
msleep(10);
}
if (val & PECFG_TLDLP_PRESENT) {
printk(KERN_INFO "PCIE%d: link is up !\n", port->index);
port->link = 1;
} else
printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index);
iounmap(mbase);
}
static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata =
{
.core_init = ppc_476fpe_pciex_core_init,
.check_link = ppc_476fpe_pciex_check_link,
};
#endif /* CONFIG_476FPE */
/* Check that the core has been initied and if not, do it */
static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
{
static int core_init;
int count = -ENODEV;
if (core_init++)
return 0;
#ifdef CONFIG_44x
if (of_device_is_compatible(np, "ibm,plb-pciex-440spe")) {
if (ppc440spe_revA())
ppc4xx_pciex_hwops = &ppc440speA_pcie_hwops;
else
ppc4xx_pciex_hwops = &ppc440speB_pcie_hwops;
}
if (of_device_is_compatible(np, "ibm,plb-pciex-460ex"))
ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx"))
ppc4xx_pciex_hwops = &apm821xx_pcie_hwops;
#endif /* CONFIG_44x */
#ifdef CONFIG_40x
if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
ppc4xx_pciex_hwops = &ppc405ex_pcie_hwops;
#endif
#ifdef CONFIG_476FPE
if (of_device_is_compatible(np, "ibm,plb-pciex-476fpe")
|| of_device_is_compatible(np, "ibm,plb-pciex-476gtr"))
ppc4xx_pciex_hwops = &ppc_476fpe_pcie_hwops;
#endif
if (ppc4xx_pciex_hwops == NULL) {
printk(KERN_WARNING "PCIE: unknown host type %pOF\n", np);
return -ENODEV;
}
count = ppc4xx_pciex_hwops->core_init(np);
if (count > 0) {
ppc4xx_pciex_ports =
kcalloc(count, sizeof(struct ppc4xx_pciex_port),
GFP_KERNEL);
if (ppc4xx_pciex_ports) {
ppc4xx_pciex_port_count = count;
return 0;
}
printk(KERN_WARNING "PCIE: failed to allocate ports array\n");
return -ENOMEM;
}
return -ENODEV;
}
static void __init ppc4xx_pciex_port_init_mapping(struct ppc4xx_pciex_port *port)
{
/* We map PCI Express configuration based on the reg property */
dcr_write(port->dcrs, DCRO_PEGPL_CFGBAH,
RES_TO_U32_HIGH(port->cfg_space.start));
dcr_write(port->dcrs, DCRO_PEGPL_CFGBAL,
RES_TO_U32_LOW(port->cfg_space.start));
/* XXX FIXME: Use size from reg property. For now, map 512M */
dcr_write(port->dcrs, DCRO_PEGPL_CFGMSK, 0xe0000001);
/* We map UTL registers based on the reg property */
dcr_write(port->dcrs, DCRO_PEGPL_REGBAH,
RES_TO_U32_HIGH(port->utl_regs.start));
dcr_write(port->dcrs, DCRO_PEGPL_REGBAL,
RES_TO_U32_LOW(port->utl_regs.start));
/* XXX FIXME: Use size from reg property */
dcr_write(port->dcrs, DCRO_PEGPL_REGMSK, 0x00007001);
/* Disable all other outbound windows */
dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL, 0);
dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL, 0);
dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL, 0);
dcr_write(port->dcrs, DCRO_PEGPL_MSGMSK, 0);
}
static int __init ppc4xx_pciex_port_init(struct ppc4xx_pciex_port *port)
{
int rc = 0;
/* Init HW */
if (ppc4xx_pciex_hwops->port_init_hw)
rc = ppc4xx_pciex_hwops->port_init_hw(port);
if (rc != 0)
return rc;
/*
* Initialize mapping: disable all regions and configure
* CFG and REG regions based on resources in the device tree
*/
ppc4xx_pciex_port_init_mapping(port);
if (ppc4xx_pciex_hwops->check_link)
ppc4xx_pciex_hwops->check_link(port);
/*
* Map UTL
*/
port->utl_base = ioremap(port->utl_regs.start, 0x100);
BUG_ON(port->utl_base == NULL);
/*
* Setup UTL registers --BenH.
*/
if (ppc4xx_pciex_hwops->setup_utl)
ppc4xx_pciex_hwops->setup_utl(port);
/*
* Check for VC0 active or PLL Locked and assert RDY.
*/
if (port->sdr_base) {
if (of_device_is_compatible(port->node,
"ibm,plb-pciex-460sx")){
if (port->link && ppc4xx_pciex_wait_on_sdr(port,
PESDRn_RCSSTS,
1 << 12, 1 << 12, 5000)) {
printk(KERN_INFO "PCIE%d: PLL not locked\n",
port->index);
port->link = 0;
}
} else if (port->link &&
ppc4xx_pciex_wait_on_sdr(port, PESDRn_RCSSTS,
1 << 16, 1 << 16, 5000)) {
printk(KERN_INFO "PCIE%d: VC0 not active\n",
port->index);
port->link = 0;
}
dcri_clrset(SDR0, port->sdr_base + PESDRn_RCSSET, 0, 1 << 20);
}
msleep(100);
return 0;
}
static int ppc4xx_pciex_validate_bdf(struct ppc4xx_pciex_port *port,
struct pci_bus *bus,
unsigned int devfn)
{
static int message;
/* Endpoint can not generate upstream(remote) config cycles */
if (port->endpoint && bus->number != port->hose->first_busno)
return PCIBIOS_DEVICE_NOT_FOUND;
/* Check we are within the mapped range */
if (bus->number > port->hose->last_busno) {
if (!message) {
printk(KERN_WARNING "Warning! Probing bus %u"
" out of range !\n", bus->number);
message++;
}
return PCIBIOS_DEVICE_NOT_FOUND;
}
/* The root complex has only one device / function */
if (bus->number == port->hose->first_busno && devfn != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
/* The other side of the RC has only one device as well */
if (bus->number == (port->hose->first_busno + 1) &&
PCI_SLOT(devfn) != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
/* Check if we have a link */
if ((bus->number != port->hose->first_busno) && !port->link)
return PCIBIOS_DEVICE_NOT_FOUND;
return 0;
}
static void __iomem *ppc4xx_pciex_get_config_base(struct ppc4xx_pciex_port *port,
struct pci_bus *bus,
unsigned int devfn)
{
int relbus;
/* Remove the casts when we finally remove the stupid volatile
* in struct pci_controller
*/
if (bus->number == port->hose->first_busno)
return (void __iomem *)port->hose->cfg_addr;
relbus = bus->number - (port->hose->first_busno + 1);
return (void __iomem *)port->hose->cfg_data +
((relbus << 20) | (devfn << 12));
}
static int ppc4xx_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
struct ppc4xx_pciex_port *port =
&ppc4xx_pciex_ports[hose->indirect_type];
void __iomem *addr;
u32 gpl_cfg;
BUG_ON(hose != port->hose);
if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
/*
* Reading from configuration space of non-existing device can
* generate transaction errors. For the read duration we suppress
* assertion of machine check exceptions to avoid those.
*/
gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
/* Make sure no CRS is recorded */
out_be32(port->utl_base + PEUTL_RCSTA, 0x00040000);
switch (len) {
case 1:
*val = in_8((u8 *)(addr + offset));
break;
case 2:
*val = in_le16((u16 *)(addr + offset));
break;
default:
*val = in_le32((u32 *)(addr + offset));
break;
}
pr_debug("pcie-config-read: bus=%3d [%3d..%3d] devfn=0x%04x"
" offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
bus->number, hose->first_busno, hose->last_busno,
devfn, offset, len, addr + offset, *val);
/* Check for CRS (440SPe rev B does that for us but heh ..) */
if (in_be32(port->utl_base + PEUTL_RCSTA) & 0x00040000) {
pr_debug("Got CRS !\n");
if (len != 4 || offset != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
*val = 0xffff0001;
}
dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
return PCIBIOS_SUCCESSFUL;
}
static int ppc4xx_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
struct ppc4xx_pciex_port *port =
&ppc4xx_pciex_ports[hose->indirect_type];
void __iomem *addr;
u32 gpl_cfg;
if (ppc4xx_pciex_validate_bdf(port, bus, devfn) != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
addr = ppc4xx_pciex_get_config_base(port, bus, devfn);
/*
* Reading from configuration space of non-existing device can
* generate transaction errors. For the read duration we suppress
* assertion of machine check exceptions to avoid those.
*/
gpl_cfg = dcr_read(port->dcrs, DCRO_PEGPL_CFG);
dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg | GPL_DMER_MASK_DISA);
pr_debug("pcie-config-write: bus=%3d [%3d..%3d] devfn=0x%04x"
" offset=0x%04x len=%d, addr=0x%p val=0x%08x\n",
bus->number, hose->first_busno, hose->last_busno,
devfn, offset, len, addr + offset, val);
switch (len) {
case 1:
out_8((u8 *)(addr + offset), val);
break;
case 2:
out_le16((u16 *)(addr + offset), val);
break;
default:
out_le32((u32 *)(addr + offset), val);
break;
}
dcr_write(port->dcrs, DCRO_PEGPL_CFG, gpl_cfg);
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops ppc4xx_pciex_pci_ops =
{
.read = ppc4xx_pciex_read_config,
.write = ppc4xx_pciex_write_config,
};
static int __init ppc4xx_setup_one_pciex_POM(struct ppc4xx_pciex_port *port,
struct pci_controller *hose,
void __iomem *mbase,
u64 plb_addr,
u64 pci_addr,
u64 size,
unsigned int flags,
int index)
{
u32 lah, lal, pciah, pcial, sa;
if (!is_power_of_2(size) ||
(index < 2 && size < 0x100000) ||
(index == 2 && size < 0x100) ||
(plb_addr & (size - 1)) != 0) {
printk(KERN_WARNING "%pOF: Resource out of range\n", hose->dn);
return -1;
}
/* Calculate register values */
lah = RES_TO_U32_HIGH(plb_addr);
lal = RES_TO_U32_LOW(plb_addr);
pciah = RES_TO_U32_HIGH(pci_addr);
pcial = RES_TO_U32_LOW(pci_addr);
sa = (0xffffffffu << ilog2(size)) | 0x1;
/* Program register values */
switch (index) {
case 0:
out_le32(mbase + PECFG_POM0LAH, pciah);
out_le32(mbase + PECFG_POM0LAL, pcial);
dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAH, lah);
dcr_write(port->dcrs, DCRO_PEGPL_OMR1BAL, lal);
dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKH, 0x7fffffff);
/*Enabled and single region */
if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
sa | DCRO_PEGPL_460SX_OMR1MSKL_UOT
| DCRO_PEGPL_OMRxMSKL_VAL);
else if (of_device_is_compatible(
port->node, "ibm,plb-pciex-476fpe") ||
of_device_is_compatible(
port->node, "ibm,plb-pciex-476gtr"))
dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
sa | DCRO_PEGPL_476FPE_OMR1MSKL_UOT
| DCRO_PEGPL_OMRxMSKL_VAL);
else
dcr_write(port->dcrs, DCRO_PEGPL_OMR1MSKL,
sa | DCRO_PEGPL_OMR1MSKL_UOT
| DCRO_PEGPL_OMRxMSKL_VAL);
break;
case 1:
out_le32(mbase + PECFG_POM1LAH, pciah);
out_le32(mbase + PECFG_POM1LAL, pcial);
dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAH, lah);
dcr_write(port->dcrs, DCRO_PEGPL_OMR2BAL, lal);
dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKH, 0x7fffffff);
dcr_write(port->dcrs, DCRO_PEGPL_OMR2MSKL,
sa | DCRO_PEGPL_OMRxMSKL_VAL);
break;
case 2:
out_le32(mbase + PECFG_POM2LAH, pciah);
out_le32(mbase + PECFG_POM2LAL, pcial);
dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAH, lah);
dcr_write(port->dcrs, DCRO_PEGPL_OMR3BAL, lal);
dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKH, 0x7fffffff);
/* Note that 3 here means enabled | IO space !!! */
dcr_write(port->dcrs, DCRO_PEGPL_OMR3MSKL,
sa | DCRO_PEGPL_OMR3MSKL_IO
| DCRO_PEGPL_OMRxMSKL_VAL);
break;
}
return 0;
}
static void __init ppc4xx_configure_pciex_POMs(struct ppc4xx_pciex_port *port,
struct pci_controller *hose,
void __iomem *mbase)
{
int i, j, found_isa_hole = 0;
/* Setup outbound memory windows */
for (i = j = 0; i < 3; i++) {
struct resource *res = &hose->mem_resources[i];
resource_size_t offset = hose->mem_offset[i];
/* we only care about memory windows */
if (!(res->flags & IORESOURCE_MEM))
continue;
if (j > 1) {
printk(KERN_WARNING "%pOF: Too many ranges\n",
port->node);
break;
}
/* Configure the resource */
if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
res->start,
res->start - offset,
resource_size(res),
res->flags,
j) == 0) {
j++;
/* If the resource PCI address is 0 then we have our
* ISA memory hole
*/
if (res->start == offset)
found_isa_hole = 1;
}
}
/* Handle ISA memory hole if not already covered */
if (j <= 1 && !found_isa_hole && hose->isa_mem_size)
if (ppc4xx_setup_one_pciex_POM(port, hose, mbase,
hose->isa_mem_phys, 0,
hose->isa_mem_size, 0, j) == 0)
printk(KERN_INFO "%pOF: Legacy ISA memory support enabled\n",
hose->dn);
/* Configure IO, always 64K starting at 0. We hard wire it to 64K !
* Note also that it -has- to be region index 2 on this HW
*/
if (hose->io_resource.flags & IORESOURCE_IO)
ppc4xx_setup_one_pciex_POM(port, hose, mbase,
hose->io_base_phys, 0,
0x10000, IORESOURCE_IO, 2);
}
static void __init ppc4xx_configure_pciex_PIMs(struct ppc4xx_pciex_port *port,
struct pci_controller *hose,
void __iomem *mbase,
struct resource *res)
{
resource_size_t size = resource_size(res);
u64 sa;
if (port->endpoint) {
resource_size_t ep_addr = 0;
resource_size_t ep_size = 32 << 20;
/* Currently we map a fixed 64MByte window to PLB address
* 0 (SDRAM). This should probably be configurable via a dts
* property.
*/
/* Calculate window size */
sa = (0xffffffffffffffffull << ilog2(ep_size));
/* Setup BAR0 */
out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa) |
PCI_BASE_ADDRESS_MEM_TYPE_64);
/* Disable BAR1 & BAR2 */
out_le32(mbase + PECFG_BAR1MPA, 0);
out_le32(mbase + PECFG_BAR2HMPA, 0);
out_le32(mbase + PECFG_BAR2LMPA, 0);
out_le32(mbase + PECFG_PIM01SAH, RES_TO_U32_HIGH(sa));
out_le32(mbase + PECFG_PIM01SAL, RES_TO_U32_LOW(sa));
out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(ep_addr));
out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(ep_addr));
} else {
/* Calculate window size */
sa = (0xffffffffffffffffull << ilog2(size));
if (res->flags & IORESOURCE_PREFETCH)
sa |= PCI_BASE_ADDRESS_MEM_PREFETCH;
if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx") ||
of_device_is_compatible(
port->node, "ibm,plb-pciex-476fpe") ||
of_device_is_compatible(
port->node, "ibm,plb-pciex-476gtr"))
sa |= PCI_BASE_ADDRESS_MEM_TYPE_64;
out_le32(mbase + PECFG_BAR0HMPA, RES_TO_U32_HIGH(sa));
out_le32(mbase + PECFG_BAR0LMPA, RES_TO_U32_LOW(sa));
/* The setup of the split looks weird to me ... let's see
* if it works
*/
out_le32(mbase + PECFG_PIM0LAL, 0x00000000);
out_le32(mbase + PECFG_PIM0LAH, 0x00000000);
out_le32(mbase + PECFG_PIM1LAL, 0x00000000);
out_le32(mbase + PECFG_PIM1LAH, 0x00000000);
out_le32(mbase + PECFG_PIM01SAH, 0xffff0000);
out_le32(mbase + PECFG_PIM01SAL, 0x00000000);
out_le32(mbase + PCI_BASE_ADDRESS_0, RES_TO_U32_LOW(res->start));
out_le32(mbase + PCI_BASE_ADDRESS_1, RES_TO_U32_HIGH(res->start));
}
/* Enable inbound mapping */
out_le32(mbase + PECFG_PIMEN, 0x1);
/* Enable I/O, Mem, and Busmaster cycles */
out_le16(mbase + PCI_COMMAND,
in_le16(mbase + PCI_COMMAND) |
PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
}
static void __init ppc4xx_pciex_port_setup_hose(struct ppc4xx_pciex_port *port)
{
struct resource dma_window;
struct pci_controller *hose = NULL;
const int *bus_range;
int primary, busses;
void __iomem *mbase = NULL, *cfg_data = NULL;
const u32 *pval;
u32 val;
/* Check if primary bridge */
primary = of_property_read_bool(port->node, "primary");
/* Get bus range if any */
bus_range = of_get_property(port->node, "bus-range", NULL);
/* Allocate the host controller data structure */
hose = pcibios_alloc_controller(port->node);
if (!hose)
goto fail;
/* We stick the port number in "indirect_type" so the config space
* ops can retrieve the port data structure easily
*/
hose->indirect_type = port->index;
/* Get bus range */
hose->first_busno = bus_range ? bus_range[0] : 0x0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
/* Because of how big mapping the config space is (1M per bus), we
* limit how many busses we support. In the long run, we could replace
* that with something akin to kmap_atomic instead. We set aside 1 bus
* for the host itself too.
*/
busses = hose->last_busno - hose->first_busno; /* This is off by 1 */
if (busses > MAX_PCIE_BUS_MAPPED) {
busses = MAX_PCIE_BUS_MAPPED;
hose->last_busno = hose->first_busno + busses;
}
if (!port->endpoint) {
/* Only map the external config space in cfg_data for
* PCIe root-complexes. External space is 1M per bus
*/
cfg_data = ioremap(port->cfg_space.start +
(hose->first_busno + 1) * 0x100000,
busses * 0x100000);
if (cfg_data == NULL) {
printk(KERN_ERR "%pOF: Can't map external config space !",
port->node);
goto fail;
}
hose->cfg_data = cfg_data;
}
/* Always map the host config space in cfg_addr.
* Internal space is 4K
*/
mbase = ioremap(port->cfg_space.start + 0x10000000, 0x1000);
if (mbase == NULL) {
printk(KERN_ERR "%pOF: Can't map internal config space !",
port->node);
goto fail;
}
hose->cfg_addr = mbase;
pr_debug("PCIE %pOF, bus %d..%d\n", port->node,
hose->first_busno, hose->last_busno);
pr_debug(" config space mapped at: root @0x%p, other @0x%p\n",
hose->cfg_addr, hose->cfg_data);
/* Setup config space */
hose->ops = &ppc4xx_pciex_pci_ops;
port->hose = hose;
mbase = (void __iomem *)hose->cfg_addr;
if (!port->endpoint) {
/*
* Set bus numbers on our root port
*/
out_8(mbase + PCI_PRIMARY_BUS, hose->first_busno);
out_8(mbase + PCI_SECONDARY_BUS, hose->first_busno + 1);
out_8(mbase + PCI_SUBORDINATE_BUS, hose->last_busno);
}
/*
* OMRs are already reset, also disable PIMs
*/
out_le32(mbase + PECFG_PIMEN, 0);
/* Parse outbound mapping resources */
pci_process_bridge_OF_ranges(hose, port->node, primary);
/* Parse inbound mapping resources */
if (ppc4xx_parse_dma_ranges(hose, mbase, &dma_window) != 0)
goto fail;
/* Configure outbound ranges POMs */
ppc4xx_configure_pciex_POMs(port, hose, mbase);
/* Configure inbound ranges PIMs */
ppc4xx_configure_pciex_PIMs(port, hose, mbase, &dma_window);
/* The root complex doesn't show up if we don't set some vendor
* and device IDs into it. The defaults below are the same bogus
* one that the initial code in arch/ppc had. This can be
* overwritten by setting the "vendor-id/device-id" properties
* in the pciex node.
*/
/* Get the (optional) vendor-/device-id from the device-tree */
pval = of_get_property(port->node, "vendor-id", NULL);
if (pval) {
val = *pval;
} else {
if (!port->endpoint)
val = 0xaaa0 + port->index;
else
val = 0xeee0 + port->index;
}
out_le16(mbase + 0x200, val);
pval = of_get_property(port->node, "device-id", NULL);
if (pval) {
val = *pval;
} else {
if (!port->endpoint)
val = 0xbed0 + port->index;
else
val = 0xfed0 + port->index;
}
out_le16(mbase + 0x202, val);
/* Enable Bus master, memory, and io space */
if (of_device_is_compatible(port->node, "ibm,plb-pciex-460sx"))
out_le16(mbase + 0x204, 0x7);
if (!port->endpoint) {
/* Set Class Code to PCI-PCI bridge and Revision Id to 1 */
out_le32(mbase + 0x208, 0x06040001);
printk(KERN_INFO "PCIE%d: successfully set as root-complex\n",
port->index);
} else {
/* Set Class Code to Processor/PPC */
out_le32(mbase + 0x208, 0x0b200001);
printk(KERN_INFO "PCIE%d: successfully set as endpoint\n",
port->index);
}
return;
fail:
if (hose)
pcibios_free_controller(hose);
if (cfg_data)
iounmap(cfg_data);
if (mbase)
iounmap(mbase);
}
static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
{
struct ppc4xx_pciex_port *port;
const u32 *pval;
int portno;
unsigned int dcrs;
/* First, proceed to core initialization as we assume there's
* only one PCIe core in the system
*/
if (ppc4xx_pciex_check_core_init(np))
return;
/* Get the port number from the device-tree */
pval = of_get_property(np, "port", NULL);
if (pval == NULL) {
printk(KERN_ERR "PCIE: Can't find port number for %pOF\n", np);
return;
}
portno = *pval;
if (portno >= ppc4xx_pciex_port_count) {
printk(KERN_ERR "PCIE: port number out of range for %pOF\n",
np);
return;
}
port = &ppc4xx_pciex_ports[portno];
port->index = portno;
/*
* Check if device is enabled
*/
if (!of_device_is_available(np)) {
printk(KERN_INFO "PCIE%d: Port disabled via device-tree\n", port->index);
return;
}
port->node = of_node_get(np);
if (ppc4xx_pciex_hwops->want_sdr) {
pval = of_get_property(np, "sdr-base", NULL);
if (pval == NULL) {
printk(KERN_ERR "PCIE: missing sdr-base for %pOF\n",
np);
return;
}
port->sdr_base = *pval;
}
/* Check if device_type property is set to "pci" or "pci-endpoint".
* Resulting from this setup this PCIe port will be configured
* as root-complex or as endpoint.
*/
if (of_node_is_type(port->node, "pci-endpoint")) {
port->endpoint = 1;
} else if (of_node_is_type(port->node, "pci")) {
port->endpoint = 0;
} else {
printk(KERN_ERR "PCIE: missing or incorrect device_type for %pOF\n",
np);
return;
}
/* Fetch config space registers address */
if (of_address_to_resource(np, 0, &port->cfg_space)) {
printk(KERN_ERR "%pOF: Can't get PCI-E config space !", np);
return;
}
/* Fetch host bridge internal registers address */
if (of_address_to_resource(np, 1, &port->utl_regs)) {
printk(KERN_ERR "%pOF: Can't get UTL register base !", np);
return;
}
/* Map DCRs */
dcrs = dcr_resource_start(np, 0);
if (dcrs == 0) {
printk(KERN_ERR "%pOF: Can't get DCR register base !", np);
return;
}
port->dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
/* Initialize the port specific registers */
if (ppc4xx_pciex_port_init(port)) {
printk(KERN_WARNING "PCIE%d: Port init failed\n", port->index);
return;
}
/* Setup the linux hose data structure */
ppc4xx_pciex_port_setup_hose(port);
}
#endif /* CONFIG_PPC4xx_PCI_EXPRESS */
static int __init ppc4xx_pci_find_bridges(void)
{
struct device_node *np;
pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
#ifdef CONFIG_PPC4xx_PCI_EXPRESS
for_each_compatible_node(np, NULL, "ibm,plb-pciex")
ppc4xx_probe_pciex_bridge(np);
#endif
for_each_compatible_node(np, NULL, "ibm,plb-pcix")
ppc4xx_probe_pcix_bridge(np);
for_each_compatible_node(np, NULL, "ibm,plb-pci")
ppc4xx_probe_pci_bridge(np);
return 0;
}
arch_initcall(ppc4xx_pci_find_bridges);
| linux-master | arch/powerpc/platforms/4xx/pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IBM/AMCC PPC4xx SoC setup code
*
* Copyright 2008 DENX Software Engineering, Stefan Roese <[email protected]>
*
* L2 cache routines cloned from arch/ppc/syslib/ibm440gx_common.c which is:
* Eugene Surovegin <[email protected]> or <[email protected]>
* Copyright (c) 2003 - 2006 Zultys Technologies
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/reg.h>
#include <asm/ppc4xx.h>
static u32 dcrbase_l2c;
/*
* L2-cache
*/
/* Issue L2C diagnostic command */
static inline u32 l2c_diag(u32 addr)
{
mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, addr);
mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_DIAG);
while (!(mfdcr(dcrbase_l2c + DCRN_L2C0_SR) & L2C_SR_CC))
;
return mfdcr(dcrbase_l2c + DCRN_L2C0_DATA);
}
static irqreturn_t l2c_error_handler(int irq, void *dev)
{
u32 sr = mfdcr(dcrbase_l2c + DCRN_L2C0_SR);
if (sr & L2C_SR_CPE) {
/* Read cache trapped address */
u32 addr = l2c_diag(0x42000000);
printk(KERN_EMERG "L2C: Cache Parity Error, addr[16:26] = 0x%08x\n",
addr);
}
if (sr & L2C_SR_TPE) {
/* Read tag trapped address */
u32 addr = l2c_diag(0x82000000) >> 16;
printk(KERN_EMERG "L2C: Tag Parity Error, addr[16:26] = 0x%08x\n",
addr);
}
/* Clear parity errors */
if (sr & (L2C_SR_CPE | L2C_SR_TPE)){
mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, 0);
mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE);
} else {
printk(KERN_EMERG "L2C: LRU error\n");
}
return IRQ_HANDLED;
}
static int __init ppc4xx_l2c_probe(void)
{
struct device_node *np;
u32 r;
unsigned long flags;
int irq;
const u32 *dcrreg;
u32 dcrbase_isram;
int len;
const u32 *prop;
u32 l2_size;
np = of_find_compatible_node(NULL, NULL, "ibm,l2-cache");
if (!np)
return 0;
/* Get l2 cache size */
prop = of_get_property(np, "cache-size", NULL);
if (prop == NULL) {
printk(KERN_ERR "%pOF: Can't get cache-size!\n", np);
of_node_put(np);
return -ENODEV;
}
l2_size = prop[0];
/* Map DCRs */
dcrreg = of_get_property(np, "dcr-reg", &len);
if (!dcrreg || (len != 4 * sizeof(u32))) {
printk(KERN_ERR "%pOF: Can't get DCR register base !", np);
of_node_put(np);
return -ENODEV;
}
dcrbase_isram = dcrreg[0];
dcrbase_l2c = dcrreg[2];
/* Get and map irq number from device tree */
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
printk(KERN_ERR "irq_of_parse_and_map failed\n");
of_node_put(np);
return -ENODEV;
}
/* Install error handler */
if (request_irq(irq, l2c_error_handler, 0, "L2C", 0) < 0) {
printk(KERN_ERR "Cannot install L2C error handler"
", cache is not enabled\n");
of_node_put(np);
return -ENODEV;
}
local_irq_save(flags);
asm volatile ("sync" ::: "memory");
/* Disable SRAM */
mtdcr(dcrbase_isram + DCRN_SRAM0_DPC,
mfdcr(dcrbase_isram + DCRN_SRAM0_DPC) & ~SRAM_DPC_ENABLE);
mtdcr(dcrbase_isram + DCRN_SRAM0_SB0CR,
mfdcr(dcrbase_isram + DCRN_SRAM0_SB0CR) & ~SRAM_SBCR_BU_MASK);
mtdcr(dcrbase_isram + DCRN_SRAM0_SB1CR,
mfdcr(dcrbase_isram + DCRN_SRAM0_SB1CR) & ~SRAM_SBCR_BU_MASK);
mtdcr(dcrbase_isram + DCRN_SRAM0_SB2CR,
mfdcr(dcrbase_isram + DCRN_SRAM0_SB2CR) & ~SRAM_SBCR_BU_MASK);
mtdcr(dcrbase_isram + DCRN_SRAM0_SB3CR,
mfdcr(dcrbase_isram + DCRN_SRAM0_SB3CR) & ~SRAM_SBCR_BU_MASK);
/* Enable L2_MODE without ICU/DCU */
r = mfdcr(dcrbase_l2c + DCRN_L2C0_CFG) &
~(L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_SS_MASK);
r |= L2C_CFG_L2M | L2C_CFG_SS_256;
mtdcr(dcrbase_l2c + DCRN_L2C0_CFG, r);
mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, 0);
/* Hardware Clear Command */
mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_HCC);
while (!(mfdcr(dcrbase_l2c + DCRN_L2C0_SR) & L2C_SR_CC))
;
/* Clear Cache Parity and Tag Errors */
mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE);
/* Enable 64G snoop region starting at 0 */
r = mfdcr(dcrbase_l2c + DCRN_L2C0_SNP0) &
~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK);
r |= L2C_SNP_SSR_32G | L2C_SNP_ESR;
mtdcr(dcrbase_l2c + DCRN_L2C0_SNP0, r);
r = mfdcr(dcrbase_l2c + DCRN_L2C0_SNP1) &
~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK);
r |= 0x80000000 | L2C_SNP_SSR_32G | L2C_SNP_ESR;
mtdcr(dcrbase_l2c + DCRN_L2C0_SNP1, r);
asm volatile ("sync" ::: "memory");
/* Enable ICU/DCU ports */
r = mfdcr(dcrbase_l2c + DCRN_L2C0_CFG);
r &= ~(L2C_CFG_DCW_MASK | L2C_CFG_PMUX_MASK | L2C_CFG_PMIM
| L2C_CFG_TPEI | L2C_CFG_CPEI | L2C_CFG_NAM | L2C_CFG_NBRM);
r |= L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_TPC | L2C_CFG_CPC | L2C_CFG_FRAN
| L2C_CFG_CPIM | L2C_CFG_TPIM | L2C_CFG_LIM | L2C_CFG_SMCM;
/* Check for 460EX/GT special handling */
if (of_device_is_compatible(np, "ibm,l2-cache-460ex") ||
of_device_is_compatible(np, "ibm,l2-cache-460gt"))
r |= L2C_CFG_RDBW;
mtdcr(dcrbase_l2c + DCRN_L2C0_CFG, r);
asm volatile ("sync; isync" ::: "memory");
local_irq_restore(flags);
printk(KERN_INFO "%dk L2-cache enabled\n", l2_size >> 10);
of_node_put(np);
return 0;
}
arch_initcall(ppc4xx_l2c_probe);
/*
* Apply a system reset. Alternatively a board specific value may be
* provided via the "reset-type" property in the cpu node.
*/
void ppc4xx_reset_system(char *cmd)
{
struct device_node *np;
u32 reset_type = DBCR0_RST_SYSTEM;
const u32 *prop;
np = of_get_cpu_node(0, NULL);
if (np) {
prop = of_get_property(np, "reset-type", NULL);
/*
* Check if property exists and if it is in range:
* 1 - PPC4xx core reset
* 2 - PPC4xx chip reset
* 3 - PPC4xx system reset (default)
*/
if ((prop) && ((prop[0] >= 1) && (prop[0] <= 3)))
reset_type = prop[0] << 28;
}
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | reset_type);
while (1)
; /* Just in case the reset doesn't work */
}
| linux-master | arch/powerpc/platforms/4xx/soc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MSI support for PPC4xx SoCs using High Speed Transfer Assist (HSTA) for
* generation of the interrupt.
*
* Copyright © 2013 Alistair Popple <[email protected]> IBM Corporation
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/semaphore.h>
#include <asm/msi_bitmap.h>
#include <asm/ppc-pci.h>
struct ppc4xx_hsta_msi {
struct device *dev;
/* The ioremapped HSTA MSI IO space */
u32 __iomem *data;
/* Physical address of HSTA MSI IO space */
u64 address;
struct msi_bitmap bmp;
/* An array mapping offsets to hardware IRQs */
int *irq_map;
/* Number of hwirqs supported */
int irq_count;
};
static struct ppc4xx_hsta_msi ppc4xx_hsta_msi;
static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct msi_msg msg;
struct msi_desc *entry;
int irq, hwirq;
u64 addr;
/* We don't support MSI-X */
if (type == PCI_CAP_ID_MSIX) {
pr_debug("%s: MSI-X not supported.\n", __func__);
return -EINVAL;
}
msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) {
irq = msi_bitmap_alloc_hwirqs(&ppc4xx_hsta_msi.bmp, 1);
if (irq < 0) {
pr_debug("%s: Failed to allocate msi interrupt\n",
__func__);
return irq;
}
hwirq = ppc4xx_hsta_msi.irq_map[irq];
if (!hwirq) {
pr_err("%s: Failed mapping irq %d\n", __func__, irq);
return -EINVAL;
}
/*
* HSTA generates interrupts on writes to 128-bit aligned
* addresses.
*/
addr = ppc4xx_hsta_msi.address + irq*0x10;
msg.address_hi = upper_32_bits(addr);
msg.address_lo = lower_32_bits(addr);
/* Data is not used by the HSTA. */
msg.data = 0;
pr_debug("%s: Setup irq %d (0x%0llx)\n", __func__, hwirq,
(((u64) msg.address_hi) << 32) | msg.address_lo);
if (irq_set_msi_desc(hwirq, entry)) {
pr_err(
"%s: Invalid hwirq %d specified in device tree\n",
__func__, hwirq);
msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1);
return -EINVAL;
}
pci_write_msi_msg(hwirq, &msg);
}
return 0;
}
static int hsta_find_hwirq_offset(int hwirq)
{
int irq;
/* Find the offset given the hwirq */
for (irq = 0; irq < ppc4xx_hsta_msi.irq_count; irq++)
if (ppc4xx_hsta_msi.irq_map[irq] == hwirq)
return irq;
return -EINVAL;
}
static void hsta_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
int irq;
msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) {
irq = hsta_find_hwirq_offset(entry->irq);
/* entry->irq should always be in irq_map */
BUG_ON(irq < 0);
irq_set_msi_desc(entry->irq, NULL);
msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1);
pr_debug("%s: Teardown IRQ %u (index %u)\n", __func__,
entry->irq, irq);
entry->irq = 0;
}
}
static int hsta_msi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *mem;
int irq, ret, irq_count;
struct pci_controller *phb;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(dev, "Unable to get mmio space\n");
return -EINVAL;
}
irq_count = of_irq_count(dev->of_node);
if (!irq_count) {
dev_err(dev, "Unable to find IRQ range\n");
return -EINVAL;
}
ppc4xx_hsta_msi.dev = dev;
ppc4xx_hsta_msi.address = mem->start;
ppc4xx_hsta_msi.data = ioremap(mem->start, resource_size(mem));
ppc4xx_hsta_msi.irq_count = irq_count;
if (!ppc4xx_hsta_msi.data) {
dev_err(dev, "Unable to map memory\n");
return -ENOMEM;
}
ret = msi_bitmap_alloc(&ppc4xx_hsta_msi.bmp, irq_count, dev->of_node);
if (ret)
goto out;
ppc4xx_hsta_msi.irq_map = kmalloc_array(irq_count, sizeof(int),
GFP_KERNEL);
if (!ppc4xx_hsta_msi.irq_map) {
ret = -ENOMEM;
goto out1;
}
/* Setup a mapping from irq offsets to hardware irq numbers */
for (irq = 0; irq < irq_count; irq++) {
ppc4xx_hsta_msi.irq_map[irq] =
irq_of_parse_and_map(dev->of_node, irq);
if (!ppc4xx_hsta_msi.irq_map[irq]) {
dev_err(dev, "Unable to map IRQ\n");
ret = -EINVAL;
goto out2;
}
}
list_for_each_entry(phb, &hose_list, list_node) {
phb->controller_ops.setup_msi_irqs = hsta_setup_msi_irqs;
phb->controller_ops.teardown_msi_irqs = hsta_teardown_msi_irqs;
}
return 0;
out2:
kfree(ppc4xx_hsta_msi.irq_map);
out1:
msi_bitmap_free(&ppc4xx_hsta_msi.bmp);
out:
iounmap(ppc4xx_hsta_msi.data);
return ret;
}
static const struct of_device_id hsta_msi_ids[] = {
{
.compatible = "ibm,hsta-msi",
},
{}
};
static struct platform_driver hsta_msi_driver = {
.probe = hsta_msi_probe,
.driver = {
.name = "hsta-msi",
.of_match_table = hsta_msi_ids,
},
};
static int hsta_msi_init(void)
{
return platform_driver_register(&hsta_msi_driver);
}
subsys_initcall(hsta_msi_init);
| linux-master | arch/powerpc/platforms/4xx/hsta_msi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*/
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/ptrace.h>
#include <asm/reg.h>
int machine_check_4xx(struct pt_regs *regs)
{
unsigned long reason = regs->esr;
if (reason & ESR_IMCP) {
printk("Instruction");
mtspr(SPRN_ESR, reason & ~ESR_IMCP);
} else
printk("Data");
printk(" machine check in kernel mode.\n");
return 0;
}
| linux-master | arch/powerpc/platforms/4xx/machine_check.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Board setup routines for the Motorola/Emerson MVME5100.
*
* Copyright 2013 CSC Australia Pty. Ltd.
*
* Based on earlier code by:
*
* Matt Porter, MontaVista Software Inc.
* Copyright 2001 MontaVista Software Inc.
*
* Author: Stephen Chivers <[email protected]>
*/
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/i8259.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#define HAWK_MPIC_SIZE 0x00040000U
#define MVME5100_PCI_MEM_OFFSET 0x00000000
/* Board register addresses. */
#define BOARD_STATUS_REG 0xfef88080
#define BOARD_MODFAIL_REG 0xfef88090
#define BOARD_MODRST_REG 0xfef880a0
#define BOARD_TBEN_REG 0xfef880c0
#define BOARD_SW_READ_REG 0xfef880e0
#define BOARD_GEO_ADDR_REG 0xfef880e8
#define BOARD_EXT_FEATURE1_REG 0xfef880f0
#define BOARD_EXT_FEATURE2_REG 0xfef88100
static phys_addr_t pci_membase;
static u_char *restart;
static void mvme5100_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
static void __init mvme5100_pic_init(void)
{
struct mpic *mpic;
struct device_node *np;
struct device_node *cp = NULL;
unsigned int cirq;
unsigned long intack = 0;
const u32 *prop = NULL;
np = of_find_node_by_type(NULL, "open-pic");
if (!np) {
pr_err("Could not find open-pic node\n");
return;
}
mpic = mpic_alloc(np, pci_membase, 0, 16, 256, " OpenPIC ");
BUG_ON(mpic == NULL);
of_node_put(np);
mpic_assign_isu(mpic, 0, pci_membase + 0x10000);
mpic_init(mpic);
cp = of_find_compatible_node(NULL, NULL, "chrp,iic");
if (cp == NULL) {
pr_warn("mvme5100_pic_init: couldn't find i8259\n");
return;
}
cirq = irq_of_parse_and_map(cp, 0);
if (!cirq) {
pr_warn("mvme5100_pic_init: no cascade interrupt?\n");
return;
}
np = of_find_compatible_node(NULL, "pci", "mpc10x-pci");
if (np) {
prop = of_get_property(np, "8259-interrupt-acknowledge", NULL);
if (prop)
intack = prop[0];
of_node_put(np);
}
if (intack)
pr_debug("mvme5100_pic_init: PCI 8259 intack at 0x%016lx\n",
intack);
i8259_init(cp, intack);
of_node_put(cp);
irq_set_chained_handler(cirq, mvme5100_8259_cascade);
}
static int __init mvme5100_add_bridge(struct device_node *dev)
{
const int *bus_range;
int len;
struct pci_controller *hose;
unsigned short devid;
pr_info("Adding PCI host bridge %pOF\n", dev);
bus_range = of_get_property(dev, "bus-range", &len);
hose = pcibios_alloc_controller(dev);
if (hose == NULL)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
setup_indirect_pci(hose, 0xfe000cf8, 0xfe000cfc, 0);
pci_process_bridge_OF_ranges(hose, dev, 1);
early_read_config_word(hose, 0, 0, PCI_DEVICE_ID, &devid);
if (devid != PCI_DEVICE_ID_MOTOROLA_HAWK) {
pr_err("HAWK PHB not present?\n");
return 0;
}
early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_1, &pci_membase);
if (pci_membase == 0) {
pr_err("HAWK PHB mibar not correctly set?\n");
return 0;
}
pr_info("mvme5100_pic_init: pci_membase: %x\n", pci_membase);
return 0;
}
static const struct of_device_id mvme5100_of_bus_ids[] __initconst = {
{ .compatible = "hawk-bridge", },
{},
};
/*
* Setup the architecture
*/
static void __init mvme5100_setup_arch(void)
{
if (ppc_md.progress)
ppc_md.progress("mvme5100_setup_arch()", 0);
restart = ioremap(BOARD_MODRST_REG, 4);
}
static void __init mvme5100_setup_pci(void)
{
struct device_node *np;
for_each_compatible_node(np, "pci", "hawk-pci")
mvme5100_add_bridge(np);
}
static void mvme5100_show_cpuinfo(struct seq_file *m)
{
seq_puts(m, "Vendor\t\t: Motorola/Emerson\n");
seq_puts(m, "Machine\t\t: MVME5100\n");
}
static void __noreturn mvme5100_restart(char *cmd)
{
local_irq_disable();
mtmsr(mfmsr() | MSR_IP);
out_8((u_char *) restart, 0x01);
while (1)
;
}
static int __init probe_of_platform_devices(void)
{
of_platform_bus_probe(NULL, mvme5100_of_bus_ids, NULL);
return 0;
}
machine_device_initcall(mvme5100, probe_of_platform_devices);
define_machine(mvme5100) {
.name = "MVME5100",
.compatible = "MVME5100",
.setup_arch = mvme5100_setup_arch,
.discover_phbs = mvme5100_setup_pci,
.init_IRQ = mvme5100_pic_init,
.show_cpuinfo = mvme5100_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = mvme5100_restart,
.progress = udbg_progress,
};
| linux-master | arch/powerpc/platforms/embedded6xx/mvme5100.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/powerpc/platforms/embedded6xx/hlwd-pic.c
*
* Nintendo Wii "Hollywood" interrupt controller support.
* Copyright (C) 2009 The GameCube Linux Team
* Copyright (C) 2009 Albert Herranz
*/
#define DRV_MODULE_NAME "hlwd-pic"
#define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/io.h>
#include "hlwd-pic.h"
#define HLWD_NR_IRQS 32
/*
* Each interrupt has a corresponding bit in both
* the Interrupt Cause (ICR) and Interrupt Mask (IMR) registers.
*
* Enabling/disabling an interrupt line involves asserting/clearing
* the corresponding bit in IMR. ACK'ing a request simply involves
* asserting the corresponding bit in ICR.
*/
#define HW_BROADWAY_ICR 0x00
#define HW_BROADWAY_IMR 0x04
#define HW_STARLET_ICR 0x08
#define HW_STARLET_IMR 0x0c
/*
* IRQ chip hooks.
*
*/
static void hlwd_pic_mask_and_ack(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
u32 mask = 1 << irq;
clrbits32(io_base + HW_BROADWAY_IMR, mask);
out_be32(io_base + HW_BROADWAY_ICR, mask);
}
static void hlwd_pic_ack(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
out_be32(io_base + HW_BROADWAY_ICR, 1 << irq);
}
static void hlwd_pic_mask(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
}
static void hlwd_pic_unmask(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
/* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */
clrbits32(io_base + HW_STARLET_IMR, 1 << irq);
}
static struct irq_chip hlwd_pic = {
.name = "hlwd-pic",
.irq_ack = hlwd_pic_ack,
.irq_mask_ack = hlwd_pic_mask_and_ack,
.irq_mask = hlwd_pic_mask,
.irq_unmask = hlwd_pic_unmask,
};
/*
* IRQ host hooks.
*
*/
static struct irq_domain *hlwd_irq_host;
static int hlwd_pic_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(virq, h->host_data);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &hlwd_pic, handle_level_irq);
return 0;
}
static const struct irq_domain_ops hlwd_irq_domain_ops = {
.map = hlwd_pic_map,
};
static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
{
void __iomem *io_base = h->host_data;
u32 irq_status;
irq_status = in_be32(io_base + HW_BROADWAY_ICR) &
in_be32(io_base + HW_BROADWAY_IMR);
if (irq_status == 0)
return 0; /* no more IRQs pending */
return __ffs(irq_status);
}
static void hlwd_pic_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_domain *irq_domain = irq_desc_get_handler_data(desc);
unsigned int hwirq;
raw_spin_lock(&desc->lock);
chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */
raw_spin_unlock(&desc->lock);
hwirq = __hlwd_pic_get_irq(irq_domain);
if (hwirq)
generic_handle_domain_irq(irq_domain, hwirq);
else
pr_err("spurious interrupt!\n");
raw_spin_lock(&desc->lock);
chip->irq_ack(&desc->irq_data); /* IRQ_LEVEL */
if (!irqd_irq_disabled(&desc->irq_data) && chip->irq_unmask)
chip->irq_unmask(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
/*
* Platform hooks.
*
*/
static void __hlwd_quiesce(void __iomem *io_base)
{
/* mask and ack all IRQs */
out_be32(io_base + HW_BROADWAY_IMR, 0);
out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff);
}
static struct irq_domain *__init hlwd_pic_init(struct device_node *np)
{
struct irq_domain *irq_domain;
struct resource res;
void __iomem *io_base;
int retval;
retval = of_address_to_resource(np, 0, &res);
if (retval) {
pr_err("no io memory range found\n");
return NULL;
}
io_base = ioremap(res.start, resource_size(&res));
if (!io_base) {
pr_err("ioremap failed\n");
return NULL;
}
pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base);
__hlwd_quiesce(io_base);
irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS,
&hlwd_irq_domain_ops, io_base);
if (!irq_domain) {
pr_err("failed to allocate irq_domain\n");
iounmap(io_base);
return NULL;
}
return irq_domain;
}
unsigned int hlwd_pic_get_irq(void)
{
unsigned int hwirq = __hlwd_pic_get_irq(hlwd_irq_host);
return hwirq ? irq_linear_revmap(hlwd_irq_host, hwirq) : 0;
}
/*
* Probe function.
*
*/
void __init hlwd_pic_probe(void)
{
struct irq_domain *host;
struct device_node *np;
const u32 *interrupts;
int cascade_virq;
for_each_compatible_node(np, NULL, "nintendo,hollywood-pic") {
interrupts = of_get_property(np, "interrupts", NULL);
if (interrupts) {
host = hlwd_pic_init(np);
BUG_ON(!host);
cascade_virq = irq_of_parse_and_map(np, 0);
irq_set_handler_data(cascade_virq, host);
irq_set_chained_handler(cascade_virq,
hlwd_pic_irq_cascade);
hlwd_irq_host = host;
of_node_put(np);
break;
}
}
}
/**
* hlwd_quiesce() - quiesce hollywood irq controller
*
* Mask and ack all interrupt sources.
*
*/
void hlwd_quiesce(void)
{
void __iomem *io_base = hlwd_irq_host->host_data;
__hlwd_quiesce(io_base);
}
| linux-master | arch/powerpc/platforms/embedded6xx/hlwd-pic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/powerpc/platforms/embedded6xx/wii.c
*
* Nintendo Wii board-specific support
* Copyright (C) 2008-2009 The GameCube Linux Team
* Copyright (C) 2008,2009 Albert Herranz
*/
#define DRV_MODULE_NAME "wii"
#define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include "flipper-pic.h"
#include "hlwd-pic.h"
#include "usbgecko_udbg.h"
/* control block */
#define HW_CTRL_COMPATIBLE "nintendo,hollywood-control"
#define HW_CTRL_RESETS 0x94
#define HW_CTRL_RESETS_SYS (1<<0)
/* gpio */
#define HW_GPIO_COMPATIBLE "nintendo,hollywood-gpio"
#define HW_GPIO_BASE(idx) (idx * 0x20)
#define HW_GPIO_OUT(idx) (HW_GPIO_BASE(idx) + 0)
#define HW_GPIO_DIR(idx) (HW_GPIO_BASE(idx) + 4)
#define HW_GPIO_OWNER (HW_GPIO_BASE(1) + 0x1c)
#define HW_GPIO_SHUTDOWN (1<<1)
#define HW_GPIO_SLOT_LED (1<<5)
#define HW_GPIO_SENSOR_BAR (1<<8)
static void __iomem *hw_ctrl;
static void __iomem *hw_gpio;
static void __noreturn wii_spin(void)
{
local_irq_disable();
for (;;)
cpu_relax();
}
static void __iomem *__init wii_ioremap_hw_regs(char *name, char *compatible)
{
void __iomem *hw_regs = NULL;
struct device_node *np;
struct resource res;
int error = -ENODEV;
np = of_find_compatible_node(NULL, NULL, compatible);
if (!np) {
pr_err("no compatible node found for %s\n", compatible);
goto out;
}
error = of_address_to_resource(np, 0, &res);
if (error) {
pr_err("no valid reg found for %pOFn\n", np);
goto out_put;
}
hw_regs = ioremap(res.start, resource_size(&res));
if (hw_regs) {
pr_info("%s at 0x%pa mapped to 0x%p\n", name,
&res.start, hw_regs);
}
out_put:
of_node_put(np);
out:
return hw_regs;
}
static void __init wii_setup_arch(void)
{
hw_ctrl = wii_ioremap_hw_regs("hw_ctrl", HW_CTRL_COMPATIBLE);
hw_gpio = wii_ioremap_hw_regs("hw_gpio", HW_GPIO_COMPATIBLE);
if (hw_gpio) {
/* turn off the front blue led and IR light */
clrbits32(hw_gpio + HW_GPIO_OUT(0),
HW_GPIO_SLOT_LED | HW_GPIO_SENSOR_BAR);
}
}
static void __noreturn wii_restart(char *cmd)
{
local_irq_disable();
if (hw_ctrl) {
/* clear the system reset pin to cause a reset */
clrbits32(hw_ctrl + HW_CTRL_RESETS, HW_CTRL_RESETS_SYS);
}
wii_spin();
}
static void wii_power_off(void)
{
local_irq_disable();
if (hw_gpio) {
/*
* set the owner of the shutdown pin to ARM, because it is
* accessed through the registers for the ARM, below
*/
clrbits32(hw_gpio + HW_GPIO_OWNER, HW_GPIO_SHUTDOWN);
/* make sure that the poweroff GPIO is configured as output */
setbits32(hw_gpio + HW_GPIO_DIR(1), HW_GPIO_SHUTDOWN);
/* drive the poweroff GPIO high */
setbits32(hw_gpio + HW_GPIO_OUT(1), HW_GPIO_SHUTDOWN);
}
wii_spin();
}
static void __noreturn wii_halt(void)
{
if (ppc_md.restart)
ppc_md.restart(NULL);
wii_spin();
}
static void __init wii_pic_probe(void)
{
flipper_pic_probe();
hlwd_pic_probe();
}
static int __init wii_probe(void)
{
pm_power_off = wii_power_off;
ug_udbg_init();
return 1;
}
static void wii_shutdown(void)
{
hlwd_quiesce();
flipper_quiesce();
}
static const struct of_device_id wii_of_bus[] = {
{ .compatible = "nintendo,hollywood", },
{ },
};
static int __init wii_device_probe(void)
{
of_platform_populate(NULL, wii_of_bus, NULL, NULL);
return 0;
}
machine_device_initcall(wii, wii_device_probe);
define_machine(wii) {
.name = "wii",
.compatible = "nintendo,wii",
.probe = wii_probe,
.setup_arch = wii_setup_arch,
.restart = wii_restart,
.halt = wii_halt,
.init_IRQ = wii_pic_probe,
.get_irq = flipper_pic_get_irq,
.progress = udbg_progress,
.machine_shutdown = wii_shutdown,
};
| linux-master | arch/powerpc/platforms/embedded6xx/wii.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Board setup routines for the IBM 750GX/CL platform w/ TSI10x bridge
*
* Copyright 2007 IBM Corporation
*
* Stephen Winiecki <[email protected]>
* Josh Boyer <[email protected]>
*
* Based on code from mpc7448_hpc2.c
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/extable.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/tsi108.h>
#include <asm/pci-bridge.h>
#include <asm/reg.h>
#include <mm/mmu_decl.h>
#include <asm/tsi108_irq.h>
#include <asm/tsi108_pci.h>
#include <asm/mpic.h>
#undef DEBUG
#define HOLLY_PCI_CFG_PHYS 0x7c000000
static int holly_exclude_device(struct pci_controller *hose, u_char bus,
u_char devfn)
{
if (bus == 0 && PCI_SLOT(devfn) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
else
return PCIBIOS_SUCCESSFUL;
}
static void __init holly_remap_bridge(void)
{
u32 lut_val, lut_addr;
int i;
printk(KERN_INFO "Remapping PCI bridge\n");
/* Re-init the PCI bridge and LUT registers to have mappings that don't
* rely on PIBS
*/
lut_addr = 0x900;
for (i = 0; i < 31; i++) {
tsi108_write_reg(TSI108_PB_OFFSET + lut_addr, 0x00000201);
lut_addr += 4;
tsi108_write_reg(TSI108_PB_OFFSET + lut_addr, 0x0);
lut_addr += 4;
}
/* Reserve the last LUT entry for PCI I/O space */
tsi108_write_reg(TSI108_PB_OFFSET + lut_addr, 0x00000241);
lut_addr += 4;
tsi108_write_reg(TSI108_PB_OFFSET + lut_addr, 0x0);
/* Map PCI I/O space */
tsi108_write_reg(TSI108_PCI_PFAB_IO_UPPER, 0x0);
tsi108_write_reg(TSI108_PCI_PFAB_IO, 0x1);
/* Map PCI CFG space */
tsi108_write_reg(TSI108_PCI_PFAB_BAR0_UPPER, 0x0);
tsi108_write_reg(TSI108_PCI_PFAB_BAR0, 0x7c000000 | 0x01);
/* We don't need MEM32 and PRM remapping so disable them */
tsi108_write_reg(TSI108_PCI_PFAB_MEM32, 0x0);
tsi108_write_reg(TSI108_PCI_PFAB_PFM3, 0x0);
tsi108_write_reg(TSI108_PCI_PFAB_PFM4, 0x0);
/* Set P2O_BAR0 */
tsi108_write_reg(TSI108_PCI_P2O_BAR0_UPPER, 0x0);
tsi108_write_reg(TSI108_PCI_P2O_BAR0, 0xc0000000);
/* Init the PCI LUTs to do no remapping */
lut_addr = 0x500;
lut_val = 0x00000002;
for (i = 0; i < 32; i++) {
tsi108_write_reg(TSI108_PCI_OFFSET + lut_addr, lut_val);
lut_addr += 4;
tsi108_write_reg(TSI108_PCI_OFFSET + lut_addr, 0x40000000);
lut_addr += 4;
lut_val += 0x02000000;
}
tsi108_write_reg(TSI108_PCI_P2O_PAGE_SIZES, 0x00007900);
/* Set 64-bit PCI bus address for system memory */
tsi108_write_reg(TSI108_PCI_P2O_BAR2_UPPER, 0x0);
tsi108_write_reg(TSI108_PCI_P2O_BAR2, 0x0);
}
static void __init holly_init_pci(void)
{
struct device_node *np;
if (ppc_md.progress)
ppc_md.progress("holly_setup_arch():set_bridge", 0);
/* setup PCI host bridge */
holly_remap_bridge();
np = of_find_node_by_type(NULL, "pci");
if (np)
tsi108_setup_pci(np, HOLLY_PCI_CFG_PHYS, 1);
of_node_put(np);
ppc_md.pci_exclude_device = holly_exclude_device;
if (ppc_md.progress)
ppc_md.progress("tsi108: resources set", 0x100);
}
static void __init holly_setup_arch(void)
{
tsi108_csr_vir_base = get_vir_csrbase();
printk(KERN_INFO "PPC750GX/CL Platform\n");
}
/*
* Interrupt setup and service. Interrupts on the holly come
* from the four external INT pins, PCI interrupts are routed via
* PCI interrupt control registers, it generates internal IRQ23
*
* Interrupt routing on the Holly Board:
* TSI108:PB_INT[0] -> CPU0:INT#
* TSI108:PB_INT[1] -> CPU0:MCP#
* TSI108:PB_INT[2] -> N/C
* TSI108:PB_INT[3] -> N/C
*/
static void __init holly_init_IRQ(void)
{
struct mpic *mpic;
#ifdef CONFIG_PCI
unsigned int cascade_pci_irq;
struct device_node *tsi_pci;
struct device_node *cascade_node = NULL;
#endif
mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
24, 0,
"Tsi108_PIC");
BUG_ON(mpic == NULL);
mpic_assign_isu(mpic, 0, mpic->paddr + 0x100);
mpic_init(mpic);
#ifdef CONFIG_PCI
tsi_pci = of_find_node_by_type(NULL, "pci");
if (tsi_pci == NULL) {
printk(KERN_ERR "%s: No tsi108 pci node found !\n", __func__);
return;
}
cascade_node = of_find_node_by_type(NULL, "pic-router");
if (cascade_node == NULL) {
printk(KERN_ERR "%s: No tsi108 pci cascade node found !\n", __func__);
return;
}
cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0);
pr_debug("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq);
tsi108_pci_int_init(cascade_node);
irq_set_handler_data(cascade_pci_irq, mpic);
irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
of_node_put(tsi_pci);
of_node_put(cascade_node);
#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
}
static void holly_show_cpuinfo(struct seq_file *m)
{
seq_printf(m, "vendor\t\t: IBM\n");
seq_printf(m, "machine\t\t: PPC750 GX/CL\n");
}
static void __noreturn holly_restart(char *cmd)
{
__be32 __iomem *ocn_bar1 = NULL;
unsigned long bar;
struct device_node *bridge = NULL;
struct resource res;
phys_addr_t addr = 0xc0000000;
local_irq_disable();
bridge = of_find_node_by_type(NULL, "tsi-bridge");
if (bridge) {
of_address_to_resource(bridge, 0, &res);
addr = res.start;
of_node_put(bridge);
}
addr += (TSI108_PB_OFFSET + 0x414);
ocn_bar1 = ioremap(addr, 0x4);
/* Turn on the BOOT bit so the addresses are correctly
* routed to the HLP interface */
bar = ioread32be(ocn_bar1);
bar |= 2;
iowrite32be(bar, ocn_bar1);
iosync();
/* Set SRR0 to the reset vector and turn on MSR_IP */
mtspr(SPRN_SRR0, 0xfff00100);
mtspr(SPRN_SRR1, MSR_IP);
/* Do an rfi to jump back to firmware. Somewhat evil,
* but it works
*/
__asm__ __volatile__("rfi" : : : "memory");
/* Spin until reset happens. Shouldn't really get here */
for (;;) ;
}
static int ppc750_machine_check_exception(struct pt_regs *regs)
{
const struct exception_table_entry *entry;
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
regs_set_recoverable(regs);
regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
return 0;
}
define_machine(holly){
.name = "PPC750 GX/CL TSI",
.compatible = "ibm,holly",
.setup_arch = holly_setup_arch,
.discover_phbs = holly_init_pci,
.init_IRQ = holly_init_IRQ,
.show_cpuinfo = holly_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = holly_restart,
.machine_check_exception = ppc750_machine_check_exception,
.progress = udbg_progress,
};
| linux-master | arch/powerpc/platforms/embedded6xx/holly.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/powerpc/platforms/embedded6xx/flipper-pic.c
*
* Nintendo GameCube/Wii "Flipper" interrupt controller support.
* Copyright (C) 2004-2009 The GameCube Linux Team
* Copyright (C) 2007,2008,2009 Albert Herranz
*/
#define DRV_MODULE_NAME "flipper-pic"
#define pr_fmt(fmt) DRV_MODULE_NAME ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/io.h>
#include "flipper-pic.h"
#define FLIPPER_NR_IRQS 32
/*
* Each interrupt has a corresponding bit in both
* the Interrupt Cause (ICR) and Interrupt Mask (IMR) registers.
*
* Enabling/disabling an interrupt line involves setting/clearing
* the corresponding bit in IMR.
* Except for the RSW interrupt, all interrupts get deasserted automatically
* when the source deasserts the interrupt.
*/
#define FLIPPER_ICR 0x00
#define FLIPPER_ICR_RSS (1<<16) /* reset switch state */
#define FLIPPER_IMR 0x04
#define FLIPPER_RESET 0x24
/*
* IRQ chip hooks.
*
*/
static void flipper_pic_mask_and_ack(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
u32 mask = 1 << irq;
clrbits32(io_base + FLIPPER_IMR, mask);
/* this is at least needed for RSW */
out_be32(io_base + FLIPPER_ICR, mask);
}
static void flipper_pic_ack(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
/* this is at least needed for RSW */
out_be32(io_base + FLIPPER_ICR, 1 << irq);
}
static void flipper_pic_mask(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
clrbits32(io_base + FLIPPER_IMR, 1 << irq);
}
static void flipper_pic_unmask(struct irq_data *d)
{
int irq = irqd_to_hwirq(d);
void __iomem *io_base = irq_data_get_irq_chip_data(d);
setbits32(io_base + FLIPPER_IMR, 1 << irq);
}
static struct irq_chip flipper_pic = {
.name = "flipper-pic",
.irq_ack = flipper_pic_ack,
.irq_mask_ack = flipper_pic_mask_and_ack,
.irq_mask = flipper_pic_mask,
.irq_unmask = flipper_pic_unmask,
};
/*
* IRQ host hooks.
*
*/
static struct irq_domain *flipper_irq_host;
static int flipper_pic_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
irq_set_chip_data(virq, h->host_data);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &flipper_pic, handle_level_irq);
return 0;
}
static const struct irq_domain_ops flipper_irq_domain_ops = {
.map = flipper_pic_map,
};
/*
* Platform hooks.
*
*/
static void __flipper_quiesce(void __iomem *io_base)
{
/* mask and ack all IRQs */
out_be32(io_base + FLIPPER_IMR, 0x00000000);
out_be32(io_base + FLIPPER_ICR, 0xffffffff);
}
static struct irq_domain * __init flipper_pic_init(struct device_node *np)
{
struct device_node *pi;
struct irq_domain *irq_domain = NULL;
struct resource res;
void __iomem *io_base;
int retval;
pi = of_get_parent(np);
if (!pi) {
pr_err("no parent found\n");
goto out;
}
if (!of_device_is_compatible(pi, "nintendo,flipper-pi")) {
pr_err("unexpected parent compatible\n");
goto out;
}
retval = of_address_to_resource(pi, 0, &res);
if (retval) {
pr_err("no io memory range found\n");
goto out;
}
io_base = ioremap(res.start, resource_size(&res));
pr_info("controller at 0x%pa mapped to 0x%p\n", &res.start, io_base);
__flipper_quiesce(io_base);
irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS,
&flipper_irq_domain_ops, io_base);
if (!irq_domain) {
pr_err("failed to allocate irq_domain\n");
return NULL;
}
out:
return irq_domain;
}
unsigned int flipper_pic_get_irq(void)
{
void __iomem *io_base = flipper_irq_host->host_data;
int irq;
u32 irq_status;
irq_status = in_be32(io_base + FLIPPER_ICR) &
in_be32(io_base + FLIPPER_IMR);
if (irq_status == 0)
return 0; /* no more IRQs pending */
irq = __ffs(irq_status);
return irq_linear_revmap(flipper_irq_host, irq);
}
/*
* Probe function.
*
*/
void __init flipper_pic_probe(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "nintendo,flipper-pic");
BUG_ON(!np);
flipper_irq_host = flipper_pic_init(np);
BUG_ON(!flipper_irq_host);
irq_set_default_host(flipper_irq_host);
of_node_put(np);
}
/*
* Misc functions related to the flipper chipset.
*
*/
/**
* flipper_quiesce() - quiesce flipper irq controller
*
* Mask and ack all interrupt sources.
*
*/
void flipper_quiesce(void)
{
void __iomem *io_base = flipper_irq_host->host_data;
__flipper_quiesce(io_base);
}
/*
* Resets the platform.
*/
void flipper_platform_reset(void)
{
void __iomem *io_base;
if (flipper_irq_host && flipper_irq_host->host_data) {
io_base = flipper_irq_host->host_data;
out_8(io_base + FLIPPER_RESET, 0x00);
}
}
/*
* Returns non-zero if the reset button is pressed.
*/
int flipper_is_reset_button_pressed(void)
{
void __iomem *io_base;
u32 icr;
if (flipper_irq_host && flipper_irq_host->host_data) {
io_base = flipper_irq_host->host_data;
icr = in_be32(io_base + FLIPPER_ICR);
return !(icr & FLIPPER_ICR_RSS);
}
return 0;
}
| linux-master | arch/powerpc/platforms/embedded6xx/flipper-pic.c |
/*
* Board setup routines for the Buffalo Linkstation / Kurobox Platform.
*
* Copyright (C) 2006 G. Liakhovetski ([email protected])
*
* Based on sandpoint.c by Mark A. Greer
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of
* any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/initrd.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/mpic.h>
#include <asm/pci-bridge.h>
#include "mpc10x.h"
static const struct of_device_id of_bus_ids[] __initconst = {
{ .type = "soc", },
{ .compatible = "simple-bus", },
{},
};
static int __init declare_of_platform_devices(void)
{
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_device_initcall(linkstation, declare_of_platform_devices);
static int __init linkstation_add_bridge(struct device_node *dev)
{
#ifdef CONFIG_PCI
int len;
struct pci_controller *hose;
const int *bus_range;
printk("Adding PCI host bridge %pOF\n", dev);
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int))
printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
" bus 0\n", dev);
hose = pcibios_alloc_controller(dev);
if (hose == NULL)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, 1);
#endif
return 0;
}
static void __init linkstation_setup_arch(void)
{
printk(KERN_INFO "BUFFALO Network Attached Storage Series\n");
printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n");
}
static void __init linkstation_setup_pci(void)
{
struct device_node *np;
/* Lookup PCI host bridges */
for_each_compatible_node(np, "pci", "mpc10x-pci")
linkstation_add_bridge(np);
}
/*
* Interrupt setup and service. Interrupts on the linkstation come
* from the four PCI slots plus onboard 8241 devices: I2C, DUART.
*/
static void __init linkstation_init_IRQ(void)
{
struct mpic *mpic;
mpic = mpic_alloc(NULL, 0, 0, 4, 0, " EPIC ");
BUG_ON(mpic == NULL);
/* PCI IRQs */
mpic_assign_isu(mpic, 0, mpic->paddr + 0x10200);
/* I2C */
mpic_assign_isu(mpic, 1, mpic->paddr + 0x11000);
/* ttyS0, ttyS1 */
mpic_assign_isu(mpic, 2, mpic->paddr + 0x11100);
mpic_init(mpic);
}
extern void avr_uart_configure(void);
extern void avr_uart_send(const char);
static void __noreturn linkstation_restart(char *cmd)
{
local_irq_disable();
/* Reset system via AVR */
avr_uart_configure();
/* Send reboot command */
avr_uart_send('C');
for(;;) /* Spin until reset happens */
avr_uart_send('G'); /* "kick" */
}
static void __noreturn linkstation_power_off(void)
{
local_irq_disable();
/* Power down system via AVR */
avr_uart_configure();
/* send shutdown command */
avr_uart_send('E');
for(;;) /* Spin until power-off happens */
avr_uart_send('G'); /* "kick" */
/* NOTREACHED */
}
static void __noreturn linkstation_halt(void)
{
linkstation_power_off();
/* NOTREACHED */
}
static void linkstation_show_cpuinfo(struct seq_file *m)
{
seq_printf(m, "vendor\t\t: Buffalo Technology\n");
seq_printf(m, "machine\t\t: Linkstation I/Kurobox(HG)\n");
}
static int __init linkstation_probe(void)
{
pm_power_off = linkstation_power_off;
return 1;
}
define_machine(linkstation){
.name = "Buffalo Linkstation",
.compatible = "linkstation",
.probe = linkstation_probe,
.setup_arch = linkstation_setup_arch,
.discover_phbs = linkstation_setup_pci,
.init_IRQ = linkstation_init_IRQ,
.show_cpuinfo = linkstation_show_cpuinfo,
.get_irq = mpic_get_irq,
.restart = linkstation_restart,
.halt = linkstation_halt,
};
| linux-master | arch/powerpc/platforms/embedded6xx/linkstation.c |
/*
* Board setup routines for the storcenter
*
* Copyright 2007 (C) Oyvind Repvik ([email protected])
* Copyright 2007 Andy Wilcox, Jon Loeliger
*
* Based on linkstation.c by G. Liakhovetski
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of
* any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/initrd.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/mpic.h>
#include <asm/pci-bridge.h>
#include "mpc10x.h"
static const struct of_device_id storcenter_of_bus[] __initconst = {
{ .name = "soc", },
{},
};
static int __init storcenter_device_probe(void)
{
of_platform_bus_probe(NULL, storcenter_of_bus, NULL);
return 0;
}
machine_device_initcall(storcenter, storcenter_device_probe);
static int __init storcenter_add_bridge(struct device_node *dev)
{
#ifdef CONFIG_PCI
int len;
struct pci_controller *hose;
const int *bus_range;
printk("Adding PCI host bridge %pOF\n", dev);
hose = pcibios_alloc_controller(dev);
if (hose == NULL)
return -ENOMEM;
bus_range = of_get_property(dev, "bus-range", &len);
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
setup_indirect_pci(hose, MPC10X_MAPB_CNFG_ADDR, MPC10X_MAPB_CNFG_DATA, 0);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, 1);
#endif
return 0;
}
static void __init storcenter_setup_arch(void)
{
printk(KERN_INFO "IOMEGA StorCenter\n");
}
static void __init storcenter_setup_pci(void)
{
struct device_node *np;
/* Lookup PCI host bridges */
for_each_compatible_node(np, "pci", "mpc10x-pci")
storcenter_add_bridge(np);
}
/*
* Interrupt setup and service. Interrupts on the turbostation come
* from the four PCI slots plus onboard 8241 devices: I2C, DUART.
*/
static void __init storcenter_init_IRQ(void)
{
struct mpic *mpic;
mpic = mpic_alloc(NULL, 0, 0, 16, 0, " OpenPIC ");
BUG_ON(mpic == NULL);
/*
* 16 Serial Interrupts followed by 16 Internal Interrupts.
* I2C is the second internal, so it is at 17, 0x11020.
*/
mpic_assign_isu(mpic, 0, mpic->paddr + 0x10200);
mpic_assign_isu(mpic, 1, mpic->paddr + 0x11000);
mpic_init(mpic);
}
static void __noreturn storcenter_restart(char *cmd)
{
local_irq_disable();
/* Set exception prefix high - to the firmware */
mtmsr(mfmsr() | MSR_IP);
isync();
/* Wait for reset to happen */
for (;;) ;
}
define_machine(storcenter){
.name = "IOMEGA StorCenter",
.compatible = "iomega,storcenter",
.setup_arch = storcenter_setup_arch,
.discover_phbs = storcenter_setup_pci,
.init_IRQ = storcenter_init_IRQ,
.get_irq = mpic_get_irq,
.restart = storcenter_restart,
};
| linux-master | arch/powerpc/platforms/embedded6xx/storcenter.c |
/*
* AVR power-management chip interface for the Buffalo Linkstation /
* Kurobox Platform.
*
* Author: 2006 (c) G. Liakhovetski
* [email protected]
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of
* any kind, whether express or implied.
*/
#include <linux/workqueue.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/termbits.h>
#include "mpc10x.h"
static void __iomem *avr_addr;
static unsigned long avr_clock;
static struct work_struct wd_work;
static void wd_stop(struct work_struct *unused)
{
const char string[] = "AAAAFFFFJJJJ>>>>VVVV>>>>ZZZZVVVVKKKK";
int i = 0, rescue = 8;
int len = strlen(string);
while (rescue--) {
int j;
char lsr = in_8(avr_addr + UART_LSR);
if (lsr & (UART_LSR_THRE | UART_LSR_TEMT)) {
for (j = 0; j < 16 && i < len; j++, i++)
out_8(avr_addr + UART_TX, string[i]);
if (i == len) {
/* Read "OK" back: 4ms for the last "KKKK"
plus a couple bytes back */
msleep(7);
printk("linkstation: disarming the AVR watchdog: ");
while (in_8(avr_addr + UART_LSR) & UART_LSR_DR)
printk("%c", in_8(avr_addr + UART_RX));
break;
}
}
msleep(17);
}
printk("\n");
}
#define AVR_QUOT(clock) ((clock) + 8 * 9600) / (16 * 9600)
void avr_uart_configure(void)
{
unsigned char cval = UART_LCR_WLEN8;
unsigned int quot = AVR_QUOT(avr_clock);
if (!avr_addr || !avr_clock)
return;
out_8(avr_addr + UART_LCR, cval); /* initialise UART */
out_8(avr_addr + UART_MCR, 0);
out_8(avr_addr + UART_IER, 0);
cval |= UART_LCR_STOP | UART_LCR_PARITY | UART_LCR_EPAR;
out_8(avr_addr + UART_LCR, cval); /* Set character format */
out_8(avr_addr + UART_LCR, cval | UART_LCR_DLAB); /* set DLAB */
out_8(avr_addr + UART_DLL, quot & 0xff); /* LS of divisor */
out_8(avr_addr + UART_DLM, quot >> 8); /* MS of divisor */
out_8(avr_addr + UART_LCR, cval); /* reset DLAB */
out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO); /* enable FIFO */
}
void avr_uart_send(const char c)
{
if (!avr_addr || !avr_clock)
return;
out_8(avr_addr + UART_TX, c);
out_8(avr_addr + UART_TX, c);
out_8(avr_addr + UART_TX, c);
out_8(avr_addr + UART_TX, c);
}
static void __init ls_uart_init(void)
{
local_irq_disable();
#ifndef CONFIG_SERIAL_8250
out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO); /* enable FIFO */
out_8(avr_addr + UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); /* clear FIFOs */
out_8(avr_addr + UART_FCR, 0);
out_8(avr_addr + UART_IER, 0);
/* Clear up interrupts */
(void) in_8(avr_addr + UART_LSR);
(void) in_8(avr_addr + UART_RX);
(void) in_8(avr_addr + UART_IIR);
(void) in_8(avr_addr + UART_MSR);
#endif
avr_uart_configure();
local_irq_enable();
}
static int __init ls_uarts_init(void)
{
struct device_node *avr;
struct resource res;
int len, ret;
avr = of_find_node_by_path("/soc10x/serial@80004500");
if (!avr)
return -EINVAL;
avr_clock = *(u32*)of_get_property(avr, "clock-frequency", &len);
if (!avr_clock)
return -EINVAL;
ret = of_address_to_resource(avr, 0, &res);
if (ret)
return ret;
of_node_put(avr);
avr_addr = ioremap(res.start, 32);
if (!avr_addr)
return -EFAULT;
ls_uart_init();
INIT_WORK(&wd_work, wd_stop);
schedule_work(&wd_work);
return 0;
}
machine_late_initcall(linkstation, ls_uarts_init);
| linux-master | arch/powerpc/platforms/embedded6xx/ls_uart.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/powerpc/platforms/embedded6xx/gamecube.c
*
* Nintendo GameCube board-specific support
* Copyright (C) 2004-2009 The GameCube Linux Team
* Copyright (C) 2007,2008,2009 Albert Herranz
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kexec.h>
#include <linux/seq_file.h>
#include <linux/of_platform.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include "flipper-pic.h"
#include "usbgecko_udbg.h"
static void __noreturn gamecube_spin(void)
{
/* spin until power button pressed */
for (;;)
cpu_relax();
}
static void __noreturn gamecube_restart(char *cmd)
{
local_irq_disable();
flipper_platform_reset();
gamecube_spin();
}
static void gamecube_power_off(void)
{
local_irq_disable();
gamecube_spin();
}
static void __noreturn gamecube_halt(void)
{
gamecube_restart(NULL);
}
static int __init gamecube_probe(void)
{
pm_power_off = gamecube_power_off;
ug_udbg_init();
return 1;
}
static void gamecube_shutdown(void)
{
flipper_quiesce();
}
define_machine(gamecube) {
.name = "gamecube",
.compatible = "nintendo,gamecube",
.probe = gamecube_probe,
.restart = gamecube_restart,
.halt = gamecube_halt,
.init_IRQ = flipper_pic_probe,
.get_irq = flipper_pic_get_irq,
.progress = udbg_progress,
.machine_shutdown = gamecube_shutdown,
};
static const struct of_device_id gamecube_of_bus[] = {
{ .compatible = "nintendo,flipper", },
{ },
};
static int __init gamecube_device_probe(void)
{
of_platform_bus_probe(NULL, gamecube_of_bus, NULL);
return 0;
}
machine_device_initcall(gamecube, gamecube_device_probe);
| linux-master | arch/powerpc/platforms/embedded6xx/gamecube.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
*
* udbg serial input/output routines for the USB Gecko adapter.
* Copyright (C) 2008-2009 The GameCube Linux Team
* Copyright (C) 2008,2009 Albert Herranz
*/
#include <linux/of_address.h>
#include <mm/mmu_decl.h>
#include <asm/io.h>
#include <asm/udbg.h>
#include <asm/fixmap.h>
#include "usbgecko_udbg.h"
#define EXI_CLK_32MHZ 5
#define EXI_CSR 0x00
#define EXI_CSR_CLKMASK (0x7<<4)
#define EXI_CSR_CLK_32MHZ (EXI_CLK_32MHZ<<4)
#define EXI_CSR_CSMASK (0x7<<7)
#define EXI_CSR_CS_0 (0x1<<7) /* Chip Select 001 */
#define EXI_CR 0x0c
#define EXI_CR_TSTART (1<<0)
#define EXI_CR_WRITE (1<<2)
#define EXI_CR_READ_WRITE (2<<2)
#define EXI_CR_TLEN(len) (((len)-1)<<4)
#define EXI_DATA 0x10
#define UG_READ_ATTEMPTS 100
#define UG_WRITE_ATTEMPTS 100
static void __iomem *ug_io_base;
/*
* Performs one input/output transaction between the exi host and the usbgecko.
*/
static u32 ug_io_transaction(u32 in)
{
u32 __iomem *csr_reg = ug_io_base + EXI_CSR;
u32 __iomem *data_reg = ug_io_base + EXI_DATA;
u32 __iomem *cr_reg = ug_io_base + EXI_CR;
u32 csr, data, cr;
/* select */
csr = EXI_CSR_CLK_32MHZ | EXI_CSR_CS_0;
out_be32(csr_reg, csr);
/* read/write */
data = in;
out_be32(data_reg, data);
cr = EXI_CR_TLEN(2) | EXI_CR_READ_WRITE | EXI_CR_TSTART;
out_be32(cr_reg, cr);
while (in_be32(cr_reg) & EXI_CR_TSTART)
barrier();
/* deselect */
out_be32(csr_reg, 0);
/* result */
data = in_be32(data_reg);
return data;
}
/*
* Returns true if an usbgecko adapter is found.
*/
static int ug_is_adapter_present(void)
{
if (!ug_io_base)
return 0;
return ug_io_transaction(0x90000000) == 0x04700000;
}
/*
* Returns true if the TX fifo is ready for transmission.
*/
static int ug_is_txfifo_ready(void)
{
return ug_io_transaction(0xc0000000) & 0x04000000;
}
/*
* Tries to transmit a character.
* If the TX fifo is not ready the result is undefined.
*/
static void ug_raw_putc(char ch)
{
ug_io_transaction(0xb0000000 | (ch << 20));
}
/*
* Transmits a character.
* It silently fails if the TX fifo is not ready after a number of retries.
*/
static void ug_putc(char ch)
{
int count = UG_WRITE_ATTEMPTS;
if (!ug_io_base)
return;
if (ch == '\n')
ug_putc('\r');
while (!ug_is_txfifo_ready() && count--)
barrier();
if (count >= 0)
ug_raw_putc(ch);
}
/*
* Returns true if the RX fifo is ready for transmission.
*/
static int ug_is_rxfifo_ready(void)
{
return ug_io_transaction(0xd0000000) & 0x04000000;
}
/*
* Tries to receive a character.
* If a character is unavailable the function returns -1.
*/
static int ug_raw_getc(void)
{
u32 data = ug_io_transaction(0xa0000000);
if (data & 0x08000000)
return (data >> 16) & 0xff;
else
return -1;
}
/*
* Receives a character.
* It fails if the RX fifo is not ready after a number of retries.
*/
static int ug_getc(void)
{
int count = UG_READ_ATTEMPTS;
if (!ug_io_base)
return -1;
while (!ug_is_rxfifo_ready() && count--)
barrier();
return ug_raw_getc();
}
/*
* udbg functions.
*
*/
/*
* Transmits a character.
*/
static void ug_udbg_putc(char ch)
{
ug_putc(ch);
}
/*
* Receives a character. Waits until a character is available.
*/
static int ug_udbg_getc(void)
{
int ch;
while ((ch = ug_getc()) == -1)
barrier();
return ch;
}
/*
* Receives a character. If a character is not available, returns -1.
*/
static int ug_udbg_getc_poll(void)
{
if (!ug_is_rxfifo_ready())
return -1;
return ug_getc();
}
/*
* Checks if a USB Gecko adapter is inserted in any memory card slot.
*/
static void __iomem *__init ug_udbg_probe(void __iomem *exi_io_base)
{
int i;
/* look for a usbgecko on memcard slots A and B */
for (i = 0; i < 2; i++) {
ug_io_base = exi_io_base + 0x14 * i;
if (ug_is_adapter_present())
break;
}
if (i == 2)
ug_io_base = NULL;
return ug_io_base;
}
/*
* USB Gecko udbg support initialization.
*/
void __init ug_udbg_init(void)
{
struct device_node *np;
void __iomem *exi_io_base;
if (ug_io_base)
udbg_printf("%s: early -> final\n", __func__);
np = of_find_compatible_node(NULL, NULL, "nintendo,flipper-exi");
if (!np) {
udbg_printf("%s: EXI node not found\n", __func__);
goto out;
}
exi_io_base = of_iomap(np, 0);
if (!exi_io_base) {
udbg_printf("%s: failed to setup EXI io base\n", __func__);
goto done;
}
if (!ug_udbg_probe(exi_io_base)) {
udbg_printf("usbgecko_udbg: not found\n");
iounmap(exi_io_base);
} else {
udbg_putc = ug_udbg_putc;
udbg_getc = ug_udbg_getc;
udbg_getc_poll = ug_udbg_getc_poll;
udbg_printf("usbgecko_udbg: ready\n");
}
done:
of_node_put(np);
out:
return;
}
#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
static phys_addr_t __init ug_early_grab_io_addr(void)
{
#if defined(CONFIG_GAMECUBE)
return 0x0c000000;
#elif defined(CONFIG_WII)
return 0x0d000000;
#else
#error Invalid platform for USB Gecko based early debugging.
#endif
}
/*
* USB Gecko early debug support initialization for udbg.
*/
void __init udbg_init_usbgecko(void)
{
void __iomem *early_debug_area;
void __iomem *exi_io_base;
/*
* At this point we have a BAT already setup that enables I/O
* to the EXI hardware.
*
* The BAT uses a virtual address range reserved at the fixmap.
* This must match the virtual address configured in
* head_32.S:setup_usbgecko_bat().
*/
early_debug_area = (void __iomem *)__fix_to_virt(FIX_EARLY_DEBUG_BASE);
exi_io_base = early_debug_area + 0x00006800;
/* try to detect a USB Gecko */
if (!ug_udbg_probe(exi_io_base))
return;
/* we found a USB Gecko, load udbg hooks */
udbg_putc = ug_udbg_putc;
udbg_getc = ug_udbg_getc;
udbg_getc_poll = ug_udbg_getc_poll;
/*
* Prepare again the same BAT for MMU_init.
* This allows udbg I/O to continue working after the MMU is
* turned on for real.
* It is safe to continue using the same virtual address as it is
* a reserved fixmap area.
*/
setbat(1, (unsigned long)early_debug_area,
ug_early_grab_io_addr(), 128*1024, PAGE_KERNEL_NCG);
}
#endif /* CONFIG_PPC_EARLY_DEBUG_USBGECKO */
| linux-master | arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 Platform spu routines.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
#include "../cell/spufs/spufs.h"
#include "platform.h"
/* spu_management_ops */
/**
* enum spe_type - Type of spe to create.
* @spe_type_logical: Standard logical spe.
*
* For use with lv1_construct_logical_spe(). The current HV does not support
* any types other than those listed.
*/
enum spe_type {
SPE_TYPE_LOGICAL = 0,
};
/**
* struct spe_shadow - logical spe shadow register area.
*
* Read-only shadow of spe registers.
*/
struct spe_shadow {
u8 padding_0140[0x0140];
u64 int_status_class0_RW; /* 0x0140 */
u64 int_status_class1_RW; /* 0x0148 */
u64 int_status_class2_RW; /* 0x0150 */
u8 padding_0158[0x0610-0x0158];
u64 mfc_dsisr_RW; /* 0x0610 */
u8 padding_0618[0x0620-0x0618];
u64 mfc_dar_RW; /* 0x0620 */
u8 padding_0628[0x0800-0x0628];
u64 mfc_dsipr_R; /* 0x0800 */
u8 padding_0808[0x0810-0x0808];
u64 mfc_lscrr_R; /* 0x0810 */
u8 padding_0818[0x0c00-0x0818];
u64 mfc_cer_R; /* 0x0c00 */
u8 padding_0c08[0x0f00-0x0c08];
u64 spe_execution_status; /* 0x0f00 */
u8 padding_0f08[0x1000-0x0f08];
};
/**
* enum spe_ex_state - Logical spe execution state.
* @spe_ex_state_unexecutable: Uninitialized.
* @spe_ex_state_executable: Enabled, not ready.
* @spe_ex_state_executed: Ready for use.
*
* The execution state (status) of the logical spe as reported in
* struct spe_shadow:spe_execution_status.
*/
enum spe_ex_state {
SPE_EX_STATE_UNEXECUTABLE = 0,
SPE_EX_STATE_EXECUTABLE = 2,
SPE_EX_STATE_EXECUTED = 3,
};
/**
* struct priv1_cache - Cached values of priv1 registers.
* @masks[]: Array of cached spe interrupt masks, indexed by class.
* @sr1: Cached mfc_sr1 register.
* @tclass_id: Cached mfc_tclass_id register.
*/
struct priv1_cache {
u64 masks[3];
u64 sr1;
u64 tclass_id;
};
/**
* struct spu_pdata - Platform state variables.
* @spe_id: HV spe id returned by lv1_construct_logical_spe().
* @resource_id: HV spe resource id returned by
* ps3_repository_read_spe_resource_id().
* @priv2_addr: lpar address of spe priv2 area returned by
* lv1_construct_logical_spe().
* @shadow_addr: lpar address of spe register shadow area returned by
* lv1_construct_logical_spe().
* @shadow: Virtual (ioremap) address of spe register shadow area.
* @cache: Cached values of priv1 registers.
*/
struct spu_pdata {
u64 spe_id;
u64 resource_id;
u64 priv2_addr;
u64 shadow_addr;
struct spe_shadow __iomem *shadow;
struct priv1_cache cache;
};
static struct spu_pdata *spu_pdata(struct spu *spu)
{
return spu->pdata;
}
#define dump_areas(_a, _b, _c, _d, _e) \
_dump_areas(_a, _b, _c, _d, _e, __func__, __LINE__)
static void _dump_areas(unsigned int spe_id, unsigned long priv2,
unsigned long problem, unsigned long ls, unsigned long shadow,
const char* func, int line)
{
pr_debug("%s:%d: spe_id: %xh (%u)\n", func, line, spe_id, spe_id);
pr_debug("%s:%d: priv2: %lxh\n", func, line, priv2);
pr_debug("%s:%d: problem: %lxh\n", func, line, problem);
pr_debug("%s:%d: ls: %lxh\n", func, line, ls);
pr_debug("%s:%d: shadow: %lxh\n", func, line, shadow);
}
u64 ps3_get_spe_id(void *arg)
{
return spu_pdata(arg)->spe_id;
}
EXPORT_SYMBOL_GPL(ps3_get_spe_id);
static unsigned long __init get_vas_id(void)
{
u64 id;
lv1_get_logical_ppe_id(&id);
lv1_get_virtual_address_space_id_of_ppe(&id);
return id;
}
static int __init construct_spu(struct spu *spu)
{
int result;
u64 unused;
u64 problem_phys;
u64 local_store_phys;
result = lv1_construct_logical_spe(PAGE_SHIFT, PAGE_SHIFT, PAGE_SHIFT,
PAGE_SHIFT, PAGE_SHIFT, get_vas_id(), SPE_TYPE_LOGICAL,
&spu_pdata(spu)->priv2_addr, &problem_phys,
&local_store_phys, &unused,
&spu_pdata(spu)->shadow_addr,
&spu_pdata(spu)->spe_id);
spu->problem_phys = problem_phys;
spu->local_store_phys = local_store_phys;
if (result) {
pr_debug("%s:%d: lv1_construct_logical_spe failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
return result;
}
static void spu_unmap(struct spu *spu)
{
iounmap(spu->priv2);
iounmap(spu->problem);
iounmap((__force u8 __iomem *)spu->local_store);
iounmap(spu_pdata(spu)->shadow);
}
/**
* setup_areas - Map the spu regions into the address space.
*
* The current HV requires the spu shadow regs to be mapped with the
* PTE page protection bits set as read-only.
*/
static int __init setup_areas(struct spu *spu)
{
struct table {char* name; unsigned long addr; unsigned long size;};
unsigned long shadow_flags = pgprot_val(pgprot_noncached_wc(PAGE_KERNEL_RO));
spu_pdata(spu)->shadow = ioremap_prot(spu_pdata(spu)->shadow_addr,
sizeof(struct spe_shadow), shadow_flags);
if (!spu_pdata(spu)->shadow) {
pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__);
goto fail_ioremap;
}
spu->local_store = (__force void *)ioremap_wc(spu->local_store_phys, LS_SIZE);
if (!spu->local_store) {
pr_debug("%s:%d: ioremap local_store failed\n",
__func__, __LINE__);
goto fail_ioremap;
}
spu->problem = ioremap(spu->problem_phys,
sizeof(struct spu_problem));
if (!spu->problem) {
pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__);
goto fail_ioremap;
}
spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr,
sizeof(struct spu_priv2));
if (!spu->priv2) {
pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__);
goto fail_ioremap;
}
dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr,
spu->problem_phys, spu->local_store_phys,
spu_pdata(spu)->shadow_addr);
dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2,
(unsigned long)spu->problem, (unsigned long)spu->local_store,
(unsigned long)spu_pdata(spu)->shadow);
return 0;
fail_ioremap:
spu_unmap(spu);
return -ENOMEM;
}
static int __init setup_interrupts(struct spu *spu)
{
int result;
result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id,
0, &spu->irqs[0]);
if (result)
goto fail_alloc_0;
result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id,
1, &spu->irqs[1]);
if (result)
goto fail_alloc_1;
result = ps3_spe_irq_setup(PS3_BINDING_CPU_ANY, spu_pdata(spu)->spe_id,
2, &spu->irqs[2]);
if (result)
goto fail_alloc_2;
return result;
fail_alloc_2:
ps3_spe_irq_destroy(spu->irqs[1]);
fail_alloc_1:
ps3_spe_irq_destroy(spu->irqs[0]);
fail_alloc_0:
spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = 0;
return result;
}
static int __init enable_spu(struct spu *spu)
{
int result;
result = lv1_enable_logical_spe(spu_pdata(spu)->spe_id,
spu_pdata(spu)->resource_id);
if (result) {
pr_debug("%s:%d: lv1_enable_logical_spe failed: %s\n",
__func__, __LINE__, ps3_result(result));
goto fail_enable;
}
result = setup_areas(spu);
if (result)
goto fail_areas;
result = setup_interrupts(spu);
if (result)
goto fail_interrupts;
return 0;
fail_interrupts:
spu_unmap(spu);
fail_areas:
lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
fail_enable:
return result;
}
static int ps3_destroy_spu(struct spu *spu)
{
int result;
pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);
result = lv1_disable_logical_spe(spu_pdata(spu)->spe_id, 0);
BUG_ON(result);
ps3_spe_irq_destroy(spu->irqs[2]);
ps3_spe_irq_destroy(spu->irqs[1]);
ps3_spe_irq_destroy(spu->irqs[0]);
spu->irqs[0] = spu->irqs[1] = spu->irqs[2] = 0;
spu_unmap(spu);
result = lv1_destruct_logical_spe(spu_pdata(spu)->spe_id);
BUG_ON(result);
kfree(spu->pdata);
spu->pdata = NULL;
return 0;
}
static int __init ps3_create_spu(struct spu *spu, void *data)
{
int result;
pr_debug("%s:%d spu_%d\n", __func__, __LINE__, spu->number);
spu->pdata = kzalloc(sizeof(struct spu_pdata),
GFP_KERNEL);
if (!spu->pdata) {
result = -ENOMEM;
goto fail_malloc;
}
spu_pdata(spu)->resource_id = (unsigned long)data;
/* Init cached reg values to HV defaults. */
spu_pdata(spu)->cache.sr1 = 0x33;
result = construct_spu(spu);
if (result)
goto fail_construct;
/* For now, just go ahead and enable it. */
result = enable_spu(spu);
if (result)
goto fail_enable;
/* Make sure the spu is in SPE_EX_STATE_EXECUTED. */
/* need something better here!!! */
while (in_be64(&spu_pdata(spu)->shadow->spe_execution_status)
!= SPE_EX_STATE_EXECUTED)
(void)0;
return result;
fail_enable:
fail_construct:
ps3_destroy_spu(spu);
fail_malloc:
return result;
}
static int __init ps3_enumerate_spus(int (*fn)(void *data))
{
int result;
unsigned int num_resource_id;
unsigned int i;
result = ps3_repository_read_num_spu_resource_id(&num_resource_id);
pr_debug("%s:%d: num_resource_id %u\n", __func__, __LINE__,
num_resource_id);
/*
* For now, just create logical spus equal to the number
* of physical spus reserved for the partition.
*/
for (i = 0; i < num_resource_id; i++) {
enum ps3_spu_resource_type resource_type;
unsigned int resource_id;
result = ps3_repository_read_spu_resource_id(i,
&resource_type, &resource_id);
if (result)
break;
if (resource_type == PS3_SPU_RESOURCE_TYPE_EXCLUSIVE) {
result = fn((void*)(unsigned long)resource_id);
if (result)
break;
}
}
if (result) {
printk(KERN_WARNING "%s:%d: Error initializing spus\n",
__func__, __LINE__);
return result;
}
return num_resource_id;
}
static int ps3_init_affinity(void)
{
return 0;
}
/**
* ps3_enable_spu - Enable SPU run control.
*
* An outstanding enhancement for the PS3 would be to add a guard to check
* for incorrect access to the spu problem state when the spu context is
* disabled. This check could be implemented with a flag added to the spu
* context that would inhibit mapping problem state pages, and a routine
* to unmap spu problem state pages. When the spu is enabled with
* ps3_enable_spu() the flag would be set allowing pages to be mapped,
* and when the spu is disabled with ps3_disable_spu() the flag would be
* cleared and the mapped problem state pages would be unmapped.
*/
static void ps3_enable_spu(struct spu_context *ctx)
{
}
static void ps3_disable_spu(struct spu_context *ctx)
{
ctx->ops->runcntl_stop(ctx);
}
static const struct spu_management_ops spu_management_ps3_ops = {
.enumerate_spus = ps3_enumerate_spus,
.create_spu = ps3_create_spu,
.destroy_spu = ps3_destroy_spu,
.enable_spu = ps3_enable_spu,
.disable_spu = ps3_disable_spu,
.init_affinity = ps3_init_affinity,
};
/* spu_priv1_ops */
static void int_mask_and(struct spu *spu, int class, u64 mask)
{
u64 old_mask;
/* are these serialized by caller??? */
old_mask = spu_int_mask_get(spu, class);
spu_int_mask_set(spu, class, old_mask & mask);
}
static void int_mask_or(struct spu *spu, int class, u64 mask)
{
u64 old_mask;
old_mask = spu_int_mask_get(spu, class);
spu_int_mask_set(spu, class, old_mask | mask);
}
static void int_mask_set(struct spu *spu, int class, u64 mask)
{
spu_pdata(spu)->cache.masks[class] = mask;
lv1_set_spe_interrupt_mask(spu_pdata(spu)->spe_id, class,
spu_pdata(spu)->cache.masks[class]);
}
static u64 int_mask_get(struct spu *spu, int class)
{
return spu_pdata(spu)->cache.masks[class];
}
static void int_stat_clear(struct spu *spu, int class, u64 stat)
{
/* Note that MFC_DSISR will be cleared when class1[MF] is set. */
lv1_clear_spe_interrupt_status(spu_pdata(spu)->spe_id, class,
stat, 0);
}
static u64 int_stat_get(struct spu *spu, int class)
{
u64 stat;
lv1_get_spe_interrupt_status(spu_pdata(spu)->spe_id, class, &stat);
return stat;
}
static void cpu_affinity_set(struct spu *spu, int cpu)
{
/* No support. */
}
static u64 mfc_dar_get(struct spu *spu)
{
return in_be64(&spu_pdata(spu)->shadow->mfc_dar_RW);
}
static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
{
/* Nothing to do, cleared in int_stat_clear(). */
}
static u64 mfc_dsisr_get(struct spu *spu)
{
return in_be64(&spu_pdata(spu)->shadow->mfc_dsisr_RW);
}
static void mfc_sdr_setup(struct spu *spu)
{
/* Nothing to do. */
}
static void mfc_sr1_set(struct spu *spu, u64 sr1)
{
/* Check bits allowed by HV. */
static const u64 allowed = ~(MFC_STATE1_LOCAL_STORAGE_DECODE_MASK
| MFC_STATE1_PROBLEM_STATE_MASK);
BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed));
spu_pdata(spu)->cache.sr1 = sr1;
lv1_set_spe_privilege_state_area_1_register(
spu_pdata(spu)->spe_id,
offsetof(struct spu_priv1, mfc_sr1_RW),
spu_pdata(spu)->cache.sr1);
}
static u64 mfc_sr1_get(struct spu *spu)
{
return spu_pdata(spu)->cache.sr1;
}
static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
{
spu_pdata(spu)->cache.tclass_id = tclass_id;
lv1_set_spe_privilege_state_area_1_register(
spu_pdata(spu)->spe_id,
offsetof(struct spu_priv1, mfc_tclass_id_RW),
spu_pdata(spu)->cache.tclass_id);
}
static u64 mfc_tclass_id_get(struct spu *spu)
{
return spu_pdata(spu)->cache.tclass_id;
}
static void tlb_invalidate(struct spu *spu)
{
/* Nothing to do. */
}
static void resource_allocation_groupID_set(struct spu *spu, u64 id)
{
/* No support. */
}
static u64 resource_allocation_groupID_get(struct spu *spu)
{
return 0; /* No support. */
}
static void resource_allocation_enable_set(struct spu *spu, u64 enable)
{
/* No support. */
}
static u64 resource_allocation_enable_get(struct spu *spu)
{
return 0; /* No support. */
}
static const struct spu_priv1_ops spu_priv1_ps3_ops = {
.int_mask_and = int_mask_and,
.int_mask_or = int_mask_or,
.int_mask_set = int_mask_set,
.int_mask_get = int_mask_get,
.int_stat_clear = int_stat_clear,
.int_stat_get = int_stat_get,
.cpu_affinity_set = cpu_affinity_set,
.mfc_dar_get = mfc_dar_get,
.mfc_dsisr_set = mfc_dsisr_set,
.mfc_dsisr_get = mfc_dsisr_get,
.mfc_sdr_setup = mfc_sdr_setup,
.mfc_sr1_set = mfc_sr1_set,
.mfc_sr1_get = mfc_sr1_get,
.mfc_tclass_id_set = mfc_tclass_id_set,
.mfc_tclass_id_get = mfc_tclass_id_get,
.tlb_invalidate = tlb_invalidate,
.resource_allocation_groupID_set = resource_allocation_groupID_set,
.resource_allocation_groupID_get = resource_allocation_groupID_get,
.resource_allocation_enable_set = resource_allocation_enable_set,
.resource_allocation_enable_get = resource_allocation_enable_get,
};
void ps3_spu_set_platform(void)
{
spu_priv1_ops = &spu_priv1_ps3_ops;
spu_management_ops = &spu_management_ps3_ops;
}
| linux-master | arch/powerpc/platforms/ps3/spu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 interrupt routines.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/smp.h>
#include "platform.h"
#if defined(DEBUG)
#define DBG udbg_printf
#define FAIL udbg_printf
#else
#define DBG pr_devel
#define FAIL pr_debug
#endif
/**
* struct ps3_bmp - a per cpu irq status and mask bitmap structure
* @status: 256 bit status bitmap indexed by plug
* @unused_1: Alignment
* @mask: 256 bit mask bitmap indexed by plug
* @unused_2: Alignment
*
* The HV maintains per SMT thread mappings of HV outlet to HV plug on
* behalf of the guest. These mappings are implemented as 256 bit guest
* supplied bitmaps indexed by plug number. The addresses of the bitmaps
* are registered with the HV through lv1_configure_irq_state_bitmap().
* The HV requires that the 512 bits of status + mask not cross a page
* boundary. PS3_BMP_MINALIGN is used to define this minimal 64 byte
* alignment.
*
* The HV supports 256 plugs per thread, assigned as {0..255}, for a total
* of 512 plugs supported on a processor. To simplify the logic this
* implementation equates HV plug value to Linux virq value, constrains each
* interrupt to have a system wide unique plug number, and limits the range
* of the plug values to map into the first dword of the bitmaps. This
* gives a usable range of plug values of {NR_IRQS_LEGACY..63}. Note
* that there is no constraint on how many in this set an individual thread
* can acquire.
*
* The mask is declared as unsigned long so we can use set/clear_bit on it.
*/
#define PS3_BMP_MINALIGN 64
struct ps3_bmp {
struct {
u64 status;
u64 unused_1[3];
unsigned long mask;
u64 unused_2[3];
};
};
/**
* struct ps3_private - a per cpu data structure
* @bmp: ps3_bmp structure
* @bmp_lock: Synchronize access to bmp.
* @ipi_debug_brk_mask: Mask for debug break IPIs
* @ppe_id: HV logical_ppe_id
* @thread_id: HV thread_id
* @ipi_mask: Mask of IPI virqs
*/
struct ps3_private {
struct ps3_bmp bmp __attribute__ ((aligned (PS3_BMP_MINALIGN)));
spinlock_t bmp_lock;
u64 ppe_id;
u64 thread_id;
unsigned long ipi_debug_brk_mask;
unsigned long ipi_mask;
};
static DEFINE_PER_CPU(struct ps3_private, ps3_private);
/**
* ps3_chip_mask - Set an interrupt mask bit in ps3_bmp.
* @virq: The assigned Linux virq.
*
* Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
*/
static void ps3_chip_mask(struct irq_data *d)
{
struct ps3_private *pd = irq_data_get_irq_chip_data(d);
unsigned long flags;
DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
pd->thread_id, d->irq);
local_irq_save(flags);
clear_bit(63 - d->irq, &pd->bmp.mask);
lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
local_irq_restore(flags);
}
/**
* ps3_chip_unmask - Clear an interrupt mask bit in ps3_bmp.
* @virq: The assigned Linux virq.
*
* Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask().
*/
static void ps3_chip_unmask(struct irq_data *d)
{
struct ps3_private *pd = irq_data_get_irq_chip_data(d);
unsigned long flags;
DBG("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__,
pd->thread_id, d->irq);
local_irq_save(flags);
set_bit(63 - d->irq, &pd->bmp.mask);
lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id);
local_irq_restore(flags);
}
/**
* ps3_chip_eoi - HV end-of-interrupt.
* @virq: The assigned Linux virq.
*
* Calls lv1_end_of_interrupt_ext().
*/
static void ps3_chip_eoi(struct irq_data *d)
{
const struct ps3_private *pd = irq_data_get_irq_chip_data(d);
/* non-IPIs are EOIed here. */
if (!test_bit(63 - d->irq, &pd->ipi_mask))
lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq);
}
/**
* ps3_irq_chip - Represents the ps3_bmp as a Linux struct irq_chip.
*/
static struct irq_chip ps3_irq_chip = {
.name = "ps3",
.irq_mask = ps3_chip_mask,
.irq_unmask = ps3_chip_unmask,
.irq_eoi = ps3_chip_eoi,
};
/**
* ps3_virq_setup - virq related setup.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @outlet: The HV outlet from the various create outlet routines.
* @virq: The assigned Linux virq.
*
* Calls irq_create_mapping() to get a virq and sets the chip data to
* ps3_private data.
*/
static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
unsigned int *virq)
{
int result;
struct ps3_private *pd;
/* This defines the default interrupt distribution policy. */
if (cpu == PS3_BINDING_CPU_ANY)
cpu = 0;
pd = &per_cpu(ps3_private, cpu);
*virq = irq_create_mapping(NULL, outlet);
if (!*virq) {
FAIL("%s:%d: irq_create_mapping failed: outlet %lu\n",
__func__, __LINE__, outlet);
result = -ENOMEM;
goto fail_create;
}
DBG("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__,
outlet, cpu, *virq);
result = irq_set_chip_data(*virq, pd);
if (result) {
FAIL("%s:%d: irq_set_chip_data failed\n",
__func__, __LINE__);
goto fail_set;
}
ps3_chip_mask(irq_get_irq_data(*virq));
return result;
fail_set:
irq_dispose_mapping(*virq);
fail_create:
return result;
}
/**
* ps3_virq_destroy - virq related teardown.
* @virq: The assigned Linux virq.
*
* Clears chip data and calls irq_dispose_mapping() for the virq.
*/
static int ps3_virq_destroy(unsigned int virq)
{
const struct ps3_private *pd = irq_get_chip_data(virq);
DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
irq_set_chip_data(virq, NULL);
irq_dispose_mapping(virq);
DBG("%s:%d <-\n", __func__, __LINE__);
return 0;
}
/**
* ps3_irq_plug_setup - Generic outlet and virq related setup.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @outlet: The HV outlet from the various create outlet routines.
* @virq: The assigned Linux virq.
*
* Sets up virq and connects the irq plug.
*/
int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet,
unsigned int *virq)
{
int result;
struct ps3_private *pd;
result = ps3_virq_setup(cpu, outlet, virq);
if (result) {
FAIL("%s:%d: ps3_virq_setup failed\n", __func__, __LINE__);
goto fail_setup;
}
pd = irq_get_chip_data(*virq);
/* Binds outlet to cpu + virq. */
result = lv1_connect_irq_plug_ext(pd->ppe_id, pd->thread_id, *virq,
outlet, 0);
if (result) {
FAIL("%s:%d: lv1_connect_irq_plug_ext failed: %s\n",
__func__, __LINE__, ps3_result(result));
result = -EPERM;
goto fail_connect;
}
return result;
fail_connect:
ps3_virq_destroy(*virq);
fail_setup:
return result;
}
EXPORT_SYMBOL_GPL(ps3_irq_plug_setup);
/**
* ps3_irq_plug_destroy - Generic outlet and virq related teardown.
* @virq: The assigned Linux virq.
*
* Disconnects the irq plug and tears down virq.
* Do not call for system bus event interrupts setup with
* ps3_sb_event_receive_port_setup().
*/
int ps3_irq_plug_destroy(unsigned int virq)
{
int result;
const struct ps3_private *pd = irq_get_chip_data(virq);
DBG("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__,
__LINE__, pd->ppe_id, pd->thread_id, virq);
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq);
if (result)
FAIL("%s:%d: lv1_disconnect_irq_plug_ext failed: %s\n",
__func__, __LINE__, ps3_result(result));
ps3_virq_destroy(virq);
return result;
}
EXPORT_SYMBOL_GPL(ps3_irq_plug_destroy);
/**
* ps3_event_receive_port_setup - Setup an event receive port.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @virq: The assigned Linux virq.
*
* The virq can be used with lv1_connect_interrupt_event_receive_port() to
* arrange to receive interrupts from system-bus devices, or with
* ps3_send_event_locally() to signal events.
*/
int ps3_event_receive_port_setup(enum ps3_cpu_binding cpu, unsigned int *virq)
{
int result;
u64 outlet;
result = lv1_construct_event_receive_port(&outlet);
if (result) {
FAIL("%s:%d: lv1_construct_event_receive_port failed: %s\n",
__func__, __LINE__, ps3_result(result));
*virq = 0;
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_event_receive_port_setup);
/**
* ps3_event_receive_port_destroy - Destroy an event receive port.
* @virq: The assigned Linux virq.
*
* Since ps3_event_receive_port_destroy destroys the receive port outlet,
* SB devices need to call disconnect_interrupt_event_receive_port() before
* this.
*/
int ps3_event_receive_port_destroy(unsigned int virq)
{
int result;
DBG(" -> %s:%d virq %u\n", __func__, __LINE__, virq);
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_destruct_event_receive_port(virq_to_hw(virq));
if (result)
FAIL("%s:%d: lv1_destruct_event_receive_port failed: %s\n",
__func__, __LINE__, ps3_result(result));
/*
* Don't call ps3_virq_destroy() here since ps3_smp_cleanup_cpu()
* calls from interrupt context (smp_call_function) when kexecing.
*/
DBG(" <- %s:%d\n", __func__, __LINE__);
return result;
}
int ps3_send_event_locally(unsigned int virq)
{
return lv1_send_event_locally(virq_to_hw(virq));
}
/**
* ps3_sb_event_receive_port_setup - Setup a system bus event receive port.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @dev: The system bus device instance.
* @virq: The assigned Linux virq.
*
* An event irq represents a virtual device interrupt. The interrupt_id
* coresponds to the software interrupt number.
*/
int ps3_sb_event_receive_port_setup(struct ps3_system_bus_device *dev,
enum ps3_cpu_binding cpu, unsigned int *virq)
{
/* this should go in system-bus.c */
int result;
result = ps3_event_receive_port_setup(cpu, virq);
if (result)
return result;
result = lv1_connect_interrupt_event_receive_port(dev->bus_id,
dev->dev_id, virq_to_hw(*virq), dev->interrupt_id);
if (result) {
FAIL("%s:%d: lv1_connect_interrupt_event_receive_port"
" failed: %s\n", __func__, __LINE__,
ps3_result(result));
ps3_event_receive_port_destroy(*virq);
*virq = 0;
return result;
}
DBG("%s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
dev->interrupt_id, *virq);
return 0;
}
EXPORT_SYMBOL(ps3_sb_event_receive_port_setup);
int ps3_sb_event_receive_port_destroy(struct ps3_system_bus_device *dev,
unsigned int virq)
{
/* this should go in system-bus.c */
int result;
DBG(" -> %s:%d: interrupt_id %u, virq %u\n", __func__, __LINE__,
dev->interrupt_id, virq);
result = lv1_disconnect_interrupt_event_receive_port(dev->bus_id,
dev->dev_id, virq_to_hw(virq), dev->interrupt_id);
if (result)
FAIL("%s:%d: lv1_disconnect_interrupt_event_receive_port"
" failed: %s\n", __func__, __LINE__,
ps3_result(result));
result = ps3_event_receive_port_destroy(virq);
BUG_ON(result);
/*
* ps3_event_receive_port_destroy() destroys the IRQ plug,
* so don't call ps3_irq_plug_destroy() here.
*/
result = ps3_virq_destroy(virq);
BUG_ON(result);
DBG(" <- %s:%d\n", __func__, __LINE__);
return result;
}
EXPORT_SYMBOL(ps3_sb_event_receive_port_destroy);
/**
* ps3_io_irq_setup - Setup a system bus io irq.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @interrupt_id: The device interrupt id read from the system repository.
* @virq: The assigned Linux virq.
*
* An io irq represents a non-virtualized device interrupt. interrupt_id
* coresponds to the interrupt number of the interrupt controller.
*/
int ps3_io_irq_setup(enum ps3_cpu_binding cpu, unsigned int interrupt_id,
unsigned int *virq)
{
int result;
u64 outlet;
result = lv1_construct_io_irq_outlet(interrupt_id, &outlet);
if (result) {
FAIL("%s:%d: lv1_construct_io_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_io_irq_setup);
int ps3_io_irq_destroy(unsigned int virq)
{
int result;
unsigned long outlet = virq_to_hw(virq);
ps3_chip_mask(irq_get_irq_data(virq));
/*
* lv1_destruct_io_irq_outlet() will destroy the IRQ plug,
* so call ps3_irq_plug_destroy() first.
*/
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
result = lv1_destruct_io_irq_outlet(outlet);
if (result)
FAIL("%s:%d: lv1_destruct_io_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
EXPORT_SYMBOL_GPL(ps3_io_irq_destroy);
/**
* ps3_vuart_irq_setup - Setup the system virtual uart virq.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @virt_addr_bmp: The caller supplied virtual uart interrupt bitmap.
* @virq: The assigned Linux virq.
*
* The system supports only a single virtual uart, so multiple calls without
* freeing the interrupt will return a wrong state error.
*/
int ps3_vuart_irq_setup(enum ps3_cpu_binding cpu, void* virt_addr_bmp,
unsigned int *virq)
{
int result;
u64 outlet;
u64 lpar_addr;
BUG_ON(!is_kernel_addr((u64)virt_addr_bmp));
lpar_addr = ps3_mm_phys_to_lpar(__pa(virt_addr_bmp));
result = lv1_configure_virtual_uart_irq(lpar_addr, &outlet);
if (result) {
FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_vuart_irq_setup);
int ps3_vuart_irq_destroy(unsigned int virq)
{
int result;
ps3_chip_mask(irq_get_irq_data(virq));
result = lv1_deconfigure_virtual_uart_irq();
if (result) {
FAIL("%s:%d: lv1_configure_virtual_uart_irq failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
return result;
}
EXPORT_SYMBOL_GPL(ps3_vuart_irq_destroy);
/**
* ps3_spe_irq_setup - Setup an spe virq.
* @cpu: enum ps3_cpu_binding indicating the cpu the interrupt should be
* serviced on.
* @spe_id: The spe_id returned from lv1_construct_logical_spe().
* @class: The spe interrupt class {0,1,2}.
* @virq: The assigned Linux virq.
*
*/
int ps3_spe_irq_setup(enum ps3_cpu_binding cpu, unsigned long spe_id,
unsigned int class, unsigned int *virq)
{
int result;
u64 outlet;
BUG_ON(class > 2);
result = lv1_get_spe_irq_outlet(spe_id, class, &outlet);
if (result) {
FAIL("%s:%d: lv1_get_spe_irq_outlet failed: %s\n",
__func__, __LINE__, ps3_result(result));
return result;
}
result = ps3_irq_plug_setup(cpu, outlet, virq);
BUG_ON(result);
return result;
}
int ps3_spe_irq_destroy(unsigned int virq)
{
int result;
ps3_chip_mask(irq_get_irq_data(virq));
result = ps3_irq_plug_destroy(virq);
BUG_ON(result);
return result;
}
#define PS3_INVALID_OUTLET ((irq_hw_number_t)-1)
#define PS3_PLUG_MAX 63
#if defined(DEBUG)
static void _dump_64_bmp(const char *header, const u64 *p, unsigned cpu,
const char* func, int line)
{
pr_debug("%s:%d: %s %u {%04llx_%04llx_%04llx_%04llx}\n",
func, line, header, cpu,
*p >> 48, (*p >> 32) & 0xffff, (*p >> 16) & 0xffff,
*p & 0xffff);
}
static void __maybe_unused _dump_256_bmp(const char *header,
const u64 *p, unsigned cpu, const char* func, int line)
{
pr_debug("%s:%d: %s %u {%016llx:%016llx:%016llx:%016llx}\n",
func, line, header, cpu, p[0], p[1], p[2], p[3]);
}
#define dump_bmp(_x) _dump_bmp(_x, __func__, __LINE__)
static void _dump_bmp(struct ps3_private* pd, const char* func, int line)
{
unsigned long flags;
spin_lock_irqsave(&pd->bmp_lock, flags);
_dump_64_bmp("stat", &pd->bmp.status, pd->thread_id, func, line);
_dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line);
spin_unlock_irqrestore(&pd->bmp_lock, flags);
}
#define dump_mask(_x) _dump_mask(_x, __func__, __LINE__)
static void __maybe_unused _dump_mask(struct ps3_private *pd,
const char* func, int line)
{
unsigned long flags;
spin_lock_irqsave(&pd->bmp_lock, flags);
_dump_64_bmp("mask", (u64*)&pd->bmp.mask, pd->thread_id, func, line);
spin_unlock_irqrestore(&pd->bmp_lock, flags);
}
#else
static void dump_bmp(struct ps3_private* pd) {};
#endif /* defined(DEBUG) */
static int ps3_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
virq);
irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq);
return 0;
}
static int ps3_host_match(struct irq_domain *h, struct device_node *np,
enum irq_domain_bus_token bus_token)
{
/* Match all */
return 1;
}
static const struct irq_domain_ops ps3_host_ops = {
.map = ps3_host_map,
.match = ps3_host_match,
};
void __init ps3_register_ipi_debug_brk(unsigned int cpu, unsigned int virq)
{
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
set_bit(63 - virq, &pd->ipi_debug_brk_mask);
DBG("%s:%d: cpu %u, virq %u, mask %lxh\n", __func__, __LINE__,
cpu, virq, pd->ipi_debug_brk_mask);
}
void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
{
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
set_bit(63 - virq, &pd->ipi_mask);
DBG("%s:%d: cpu %u, virq %u, ipi_mask %lxh\n", __func__, __LINE__,
cpu, virq, pd->ipi_mask);
}
static unsigned int ps3_get_irq(void)
{
struct ps3_private *pd = this_cpu_ptr(&ps3_private);
u64 x = (pd->bmp.status & pd->bmp.mask);
unsigned int plug;
/* check for ipi break first to stop this cpu ASAP */
if (x & pd->ipi_debug_brk_mask)
x &= pd->ipi_debug_brk_mask;
asm volatile("cntlzd %0,%1" : "=r" (plug) : "r" (x));
plug &= 0x3f;
if (unlikely(!plug)) {
DBG("%s:%d: no plug found: thread_id %llu\n", __func__,
__LINE__, pd->thread_id);
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
return 0;
}
#if defined(DEBUG)
if (unlikely(plug < NR_IRQS_LEGACY || plug > PS3_PLUG_MAX)) {
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
BUG();
}
#endif
/* IPIs are EOIed here. */
if (test_bit(63 - plug, &pd->ipi_mask))
lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, plug);
return plug;
}
void __init ps3_init_IRQ(void)
{
int result;
unsigned cpu;
struct irq_domain *host;
host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
irq_set_default_host(host);
for_each_possible_cpu(cpu) {
struct ps3_private *pd = &per_cpu(ps3_private, cpu);
lv1_get_logical_ppe_id(&pd->ppe_id);
pd->thread_id = get_hard_smp_processor_id(cpu);
spin_lock_init(&pd->bmp_lock);
DBG("%s:%d: ppe_id %llu, thread_id %llu, bmp %lxh\n",
__func__, __LINE__, pd->ppe_id, pd->thread_id,
ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
result = lv1_configure_irq_state_bitmap(pd->ppe_id,
pd->thread_id, ps3_mm_phys_to_lpar(__pa(&pd->bmp)));
if (result)
FAIL("%s:%d: lv1_configure_irq_state_bitmap failed:"
" %s\n", __func__, __LINE__,
ps3_result(result));
}
ppc_md.get_irq = ps3_get_irq;
}
void ps3_shutdown_IRQ(int cpu)
{
int result;
u64 ppe_id;
u64 thread_id = get_hard_smp_processor_id(cpu);
lv1_get_logical_ppe_id(&ppe_id);
result = lv1_configure_irq_state_bitmap(ppe_id, thread_id, 0);
DBG("%s:%d: lv1_configure_irq_state_bitmap (%llu:%llu/%d) %s\n", __func__,
__LINE__, ppe_id, thread_id, cpu, ps3_result(result));
}
| linux-master | arch/powerpc/platforms/ps3/interrupt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 pagetable management routines.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006, 2007 Sony Corporation
*/
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/ps3fb.h>
#define PS3_VERBOSE_RESULT
#include "platform.h"
/**
* enum lpar_vas_id - id of LPAR virtual address space.
* @lpar_vas_id_current: Current selected virtual address space
*
* Identify the target LPAR address space.
*/
enum ps3_lpar_vas_id {
PS3_LPAR_VAS_ID_CURRENT = 0,
};
static DEFINE_SPINLOCK(ps3_htab_lock);
static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
unsigned long pa, unsigned long rflags, unsigned long vflags,
int psize, int apsize, int ssize)
{
int result;
u64 hpte_v, hpte_r;
u64 inserted_index;
u64 evicted_v, evicted_r;
u64 hpte_v_array[4], hpte_rs;
unsigned long flags;
long ret = -1;
/*
* lv1_insert_htab_entry() will search for victim
* entry in both primary and secondary pte group
*/
vflags &= ~HPTE_V_SECONDARY;
hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(ps3_mm_phys_to_lpar(pa), psize, apsize) | rflags;
spin_lock_irqsave(&ps3_htab_lock, flags);
/* talk hvc to replace entries BOLTED == 0 */
result = lv1_insert_htab_entry(PS3_LPAR_VAS_ID_CURRENT, hpte_group,
hpte_v, hpte_r,
HPTE_V_BOLTED, 0,
&inserted_index,
&evicted_v, &evicted_r);
if (result) {
/* all entries bolted !*/
pr_info("%s:result=%s vpn=%lx pa=%lx ix=%lx v=%llx r=%llx\n",
__func__, ps3_result(result), vpn, pa, hpte_group,
hpte_v, hpte_r);
BUG();
}
/*
* see if the entry is inserted into secondary pteg
*/
result = lv1_read_htab_entries(PS3_LPAR_VAS_ID_CURRENT,
inserted_index & ~0x3UL,
&hpte_v_array[0], &hpte_v_array[1],
&hpte_v_array[2], &hpte_v_array[3],
&hpte_rs);
BUG_ON(result);
if (hpte_v_array[inserted_index % 4] & HPTE_V_SECONDARY)
ret = (inserted_index & 7) | (1 << 3);
else
ret = inserted_index & 7;
spin_unlock_irqrestore(&ps3_htab_lock, flags);
return ret;
}
static long ps3_hpte_remove(unsigned long hpte_group)
{
panic("ps3_hpte_remove() not implemented");
return 0;
}
static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
unsigned long vpn, int psize, int apsize,
int ssize, unsigned long inv_flags)
{
int result;
u64 hpte_v, want_v, hpte_rs;
u64 hpte_v_array[4];
unsigned long flags;
long ret;
want_v = hpte_encode_avpn(vpn, psize, ssize);
spin_lock_irqsave(&ps3_htab_lock, flags);
result = lv1_read_htab_entries(PS3_LPAR_VAS_ID_CURRENT, slot & ~0x3UL,
&hpte_v_array[0], &hpte_v_array[1],
&hpte_v_array[2], &hpte_v_array[3],
&hpte_rs);
if (result) {
pr_info("%s: result=%s read vpn=%lx slot=%lx psize=%d\n",
__func__, ps3_result(result), vpn, slot, psize);
BUG();
}
hpte_v = hpte_v_array[slot % 4];
/*
* As lv1_read_htab_entries() does not give us the RPN, we can
* not synthesize the new hpte_r value here, and therefore can
* not update the hpte with lv1_insert_htab_entry(), so we
* instead invalidate it and ask the caller to update it via
* ps3_hpte_insert() by returning a -1 value.
*/
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
/* not found */
ret = -1;
} else {
/* entry found, just invalidate it */
result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT,
slot, 0, 0);
ret = -1;
}
spin_unlock_irqrestore(&ps3_htab_lock, flags);
return ret;
}
static void ps3_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
int psize, int ssize)
{
pr_info("ps3_hpte_updateboltedpp() not implemented");
}
static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn,
int psize, int apsize, int ssize, int local)
{
unsigned long flags;
int result;
spin_lock_irqsave(&ps3_htab_lock, flags);
result = lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, slot, 0, 0);
if (result) {
pr_info("%s: result=%s vpn=%lx slot=%lx psize=%d\n",
__func__, ps3_result(result), vpn, slot, psize);
BUG();
}
spin_unlock_irqrestore(&ps3_htab_lock, flags);
}
/* Called during kexec sequence with MMU off */
static notrace void ps3_hpte_clear(void)
{
unsigned long hpte_count = (1UL << ppc64_pft_size) >> 4;
u64 i;
for (i = 0; i < hpte_count; i++)
lv1_write_htab_entry(PS3_LPAR_VAS_ID_CURRENT, i, 0, 0);
ps3_mm_shutdown();
ps3_mm_vas_destroy();
}
void __init ps3_hpte_init(unsigned long htab_size)
{
mmu_hash_ops.hpte_invalidate = ps3_hpte_invalidate;
mmu_hash_ops.hpte_updatepp = ps3_hpte_updatepp;
mmu_hash_ops.hpte_updateboltedpp = ps3_hpte_updateboltedpp;
mmu_hash_ops.hpte_insert = ps3_hpte_insert;
mmu_hash_ops.hpte_remove = ps3_hpte_remove;
mmu_hash_ops.hpte_clear_all = ps3_hpte_clear;
ppc64_pft_size = __ilog2(htab_size);
}
| linux-master | arch/powerpc/platforms/ps3/htab.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 system bus driver.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/dma-map-ops.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/firmware.h>
#include <asm/cell-regs.h>
#include "platform.h"
static struct device ps3_system_bus = {
.init_name = "ps3_system",
};
/* FIXME: need device usage counters! */
static struct {
struct mutex mutex;
int sb_11; /* usb 0 */
int sb_12; /* usb 0 */
int gpu;
} usage_hack;
static int ps3_is_device(struct ps3_system_bus_device *dev, u64 bus_id,
u64 dev_id)
{
return dev->bus_id == bus_id && dev->dev_id == dev_id;
}
static int ps3_open_hv_device_sb(struct ps3_system_bus_device *dev)
{
int result;
BUG_ON(!dev->bus_id);
mutex_lock(&usage_hack.mutex);
if (ps3_is_device(dev, 1, 1)) {
usage_hack.sb_11++;
if (usage_hack.sb_11 > 1) {
result = 0;
goto done;
}
}
if (ps3_is_device(dev, 1, 2)) {
usage_hack.sb_12++;
if (usage_hack.sb_12 > 1) {
result = 0;
goto done;
}
}
result = lv1_open_device(dev->bus_id, dev->dev_id, 0);
if (result) {
pr_warn("%s:%d: lv1_open_device dev=%u.%u(%s) failed: %s\n",
__func__, __LINE__, dev->match_id, dev->match_sub_id,
dev_name(&dev->core), ps3_result(result));
result = -EPERM;
}
done:
mutex_unlock(&usage_hack.mutex);
return result;
}
static int ps3_close_hv_device_sb(struct ps3_system_bus_device *dev)
{
int result;
BUG_ON(!dev->bus_id);
mutex_lock(&usage_hack.mutex);
if (ps3_is_device(dev, 1, 1)) {
usage_hack.sb_11--;
if (usage_hack.sb_11) {
result = 0;
goto done;
}
}
if (ps3_is_device(dev, 1, 2)) {
usage_hack.sb_12--;
if (usage_hack.sb_12) {
result = 0;
goto done;
}
}
result = lv1_close_device(dev->bus_id, dev->dev_id);
BUG_ON(result);
done:
mutex_unlock(&usage_hack.mutex);
return result;
}
static int ps3_open_hv_device_gpu(struct ps3_system_bus_device *dev)
{
int result;
mutex_lock(&usage_hack.mutex);
usage_hack.gpu++;
if (usage_hack.gpu > 1) {
result = 0;
goto done;
}
result = lv1_gpu_open(0);
if (result) {
pr_warn("%s:%d: lv1_gpu_open failed: %s\n", __func__,
__LINE__, ps3_result(result));
result = -EPERM;
}
done:
mutex_unlock(&usage_hack.mutex);
return result;
}
static int ps3_close_hv_device_gpu(struct ps3_system_bus_device *dev)
{
int result;
mutex_lock(&usage_hack.mutex);
usage_hack.gpu--;
if (usage_hack.gpu) {
result = 0;
goto done;
}
result = lv1_gpu_close();
BUG_ON(result);
done:
mutex_unlock(&usage_hack.mutex);
return result;
}
int ps3_open_hv_device(struct ps3_system_bus_device *dev)
{
BUG_ON(!dev);
pr_debug("%s:%d: match_id: %u\n", __func__, __LINE__, dev->match_id);
switch (dev->match_id) {
case PS3_MATCH_ID_EHCI:
case PS3_MATCH_ID_OHCI:
case PS3_MATCH_ID_GELIC:
case PS3_MATCH_ID_STOR_DISK:
case PS3_MATCH_ID_STOR_ROM:
case PS3_MATCH_ID_STOR_FLASH:
return ps3_open_hv_device_sb(dev);
case PS3_MATCH_ID_SOUND:
case PS3_MATCH_ID_GPU:
return ps3_open_hv_device_gpu(dev);
case PS3_MATCH_ID_AV_SETTINGS:
case PS3_MATCH_ID_SYSTEM_MANAGER:
pr_debug("%s:%d: unsupported match_id: %u\n", __func__,
__LINE__, dev->match_id);
pr_debug("%s:%d: bus_id: %llu\n", __func__, __LINE__,
dev->bus_id);
BUG();
return -EINVAL;
default:
break;
}
pr_debug("%s:%d: unknown match_id: %u\n", __func__, __LINE__,
dev->match_id);
BUG();
return -ENODEV;
}
EXPORT_SYMBOL_GPL(ps3_open_hv_device);
int ps3_close_hv_device(struct ps3_system_bus_device *dev)
{
BUG_ON(!dev);
pr_debug("%s:%d: match_id: %u\n", __func__, __LINE__, dev->match_id);
switch (dev->match_id) {
case PS3_MATCH_ID_EHCI:
case PS3_MATCH_ID_OHCI:
case PS3_MATCH_ID_GELIC:
case PS3_MATCH_ID_STOR_DISK:
case PS3_MATCH_ID_STOR_ROM:
case PS3_MATCH_ID_STOR_FLASH:
return ps3_close_hv_device_sb(dev);
case PS3_MATCH_ID_SOUND:
case PS3_MATCH_ID_GPU:
return ps3_close_hv_device_gpu(dev);
case PS3_MATCH_ID_AV_SETTINGS:
case PS3_MATCH_ID_SYSTEM_MANAGER:
pr_debug("%s:%d: unsupported match_id: %u\n", __func__,
__LINE__, dev->match_id);
pr_debug("%s:%d: bus_id: %llu\n", __func__, __LINE__,
dev->bus_id);
BUG();
return -EINVAL;
default:
break;
}
pr_debug("%s:%d: unknown match_id: %u\n", __func__, __LINE__,
dev->match_id);
BUG();
return -ENODEV;
}
EXPORT_SYMBOL_GPL(ps3_close_hv_device);
#define dump_mmio_region(_a) _dump_mmio_region(_a, __func__, __LINE__)
static void _dump_mmio_region(const struct ps3_mmio_region* r,
const char* func, int line)
{
pr_debug("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id,
r->dev->dev_id);
pr_debug("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
pr_debug("%s:%d: len %lxh\n", func, line, r->len);
pr_debug("%s:%d: lpar_addr %lxh\n", func, line, r->lpar_addr);
}
static int ps3_sb_mmio_region_create(struct ps3_mmio_region *r)
{
int result;
u64 lpar_addr;
result = lv1_map_device_mmio_region(r->dev->bus_id, r->dev->dev_id,
r->bus_addr, r->len, r->page_size, &lpar_addr);
r->lpar_addr = lpar_addr;
if (result) {
pr_debug("%s:%d: lv1_map_device_mmio_region failed: %s\n",
__func__, __LINE__, ps3_result(result));
r->lpar_addr = 0;
}
dump_mmio_region(r);
return result;
}
static int ps3_ioc0_mmio_region_create(struct ps3_mmio_region *r)
{
/* device specific; do nothing currently */
return 0;
}
int ps3_mmio_region_create(struct ps3_mmio_region *r)
{
return r->mmio_ops->create(r);
}
EXPORT_SYMBOL_GPL(ps3_mmio_region_create);
static int ps3_sb_free_mmio_region(struct ps3_mmio_region *r)
{
int result;
dump_mmio_region(r);
result = lv1_unmap_device_mmio_region(r->dev->bus_id, r->dev->dev_id,
r->lpar_addr);
if (result)
pr_debug("%s:%d: lv1_unmap_device_mmio_region failed: %s\n",
__func__, __LINE__, ps3_result(result));
r->lpar_addr = 0;
return result;
}
static int ps3_ioc0_free_mmio_region(struct ps3_mmio_region *r)
{
/* device specific; do nothing currently */
return 0;
}
int ps3_free_mmio_region(struct ps3_mmio_region *r)
{
return r->mmio_ops->free(r);
}
EXPORT_SYMBOL_GPL(ps3_free_mmio_region);
static const struct ps3_mmio_region_ops ps3_mmio_sb_region_ops = {
.create = ps3_sb_mmio_region_create,
.free = ps3_sb_free_mmio_region
};
static const struct ps3_mmio_region_ops ps3_mmio_ioc0_region_ops = {
.create = ps3_ioc0_mmio_region_create,
.free = ps3_ioc0_free_mmio_region
};
int ps3_mmio_region_init(struct ps3_system_bus_device *dev,
struct ps3_mmio_region *r, unsigned long bus_addr, unsigned long len,
enum ps3_mmio_page_size page_size)
{
r->dev = dev;
r->bus_addr = bus_addr;
r->len = len;
r->page_size = page_size;
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_SB:
r->mmio_ops = &ps3_mmio_sb_region_ops;
break;
case PS3_DEVICE_TYPE_IOC0:
r->mmio_ops = &ps3_mmio_ioc0_region_ops;
break;
default:
BUG();
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(ps3_mmio_region_init);
static int ps3_system_bus_match(struct device *_dev,
struct device_driver *_drv)
{
int result;
struct ps3_system_bus_driver *drv = ps3_drv_to_system_bus_drv(_drv);
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
if (!dev->match_sub_id)
result = dev->match_id == drv->match_id;
else
result = dev->match_sub_id == drv->match_sub_id &&
dev->match_id == drv->match_id;
if (result)
pr_info("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): match\n",
__func__, __LINE__,
dev->match_id, dev->match_sub_id, dev_name(&dev->core),
drv->match_id, drv->match_sub_id, drv->core.name);
else
pr_debug("%s:%d: dev=%u.%u(%s), drv=%u.%u(%s): miss\n",
__func__, __LINE__,
dev->match_id, dev->match_sub_id, dev_name(&dev->core),
drv->match_id, drv->match_sub_id, drv->core.name);
return result;
}
static int ps3_system_bus_probe(struct device *_dev)
{
int result = 0;
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
struct ps3_system_bus_driver *drv;
BUG_ON(!dev);
dev_dbg(_dev, "%s:%d\n", __func__, __LINE__);
drv = ps3_system_bus_dev_to_system_bus_drv(dev);
BUG_ON(!drv);
if (drv->probe)
result = drv->probe(dev);
else
pr_debug("%s:%d: %s no probe method\n", __func__, __LINE__,
dev_name(&dev->core));
pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
return result;
}
static void ps3_system_bus_remove(struct device *_dev)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
struct ps3_system_bus_driver *drv;
BUG_ON(!dev);
dev_dbg(_dev, "%s:%d\n", __func__, __LINE__);
drv = ps3_system_bus_dev_to_system_bus_drv(dev);
BUG_ON(!drv);
if (drv->remove)
drv->remove(dev);
else
dev_dbg(&dev->core, "%s:%d %s: no remove method\n",
__func__, __LINE__, drv->core.name);
pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
}
static void ps3_system_bus_shutdown(struct device *_dev)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
struct ps3_system_bus_driver *drv;
BUG_ON(!dev);
dev_dbg(&dev->core, " -> %s:%d: match_id %d\n", __func__, __LINE__,
dev->match_id);
if (!dev->core.driver) {
dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
__LINE__);
return;
}
drv = ps3_system_bus_dev_to_system_bus_drv(dev);
BUG_ON(!drv);
dev_dbg(&dev->core, "%s:%d: %s -> %s\n", __func__, __LINE__,
dev_name(&dev->core), drv->core.name);
if (drv->shutdown)
drv->shutdown(dev);
else if (drv->remove) {
dev_dbg(&dev->core, "%s:%d %s: no shutdown, calling remove\n",
__func__, __LINE__, drv->core.name);
drv->remove(dev);
} else {
dev_dbg(&dev->core, "%s:%d %s: no shutdown method\n",
__func__, __LINE__, drv->core.name);
BUG();
}
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
}
static int ps3_system_bus_uevent(const struct device *_dev, struct kobj_uevent_env *env)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
if (add_uevent_var(env, "MODALIAS=ps3:%d:%d", dev->match_id,
dev->match_sub_id))
return -ENOMEM;
return 0;
}
static ssize_t modalias_show(struct device *_dev, struct device_attribute *a,
char *buf)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int len = snprintf(buf, PAGE_SIZE, "ps3:%d:%d\n", dev->match_id,
dev->match_sub_id);
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *ps3_system_bus_dev_attrs[] = {
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(ps3_system_bus_dev);
static struct bus_type ps3_system_bus_type = {
.name = "ps3_system_bus",
.match = ps3_system_bus_match,
.uevent = ps3_system_bus_uevent,
.probe = ps3_system_bus_probe,
.remove = ps3_system_bus_remove,
.shutdown = ps3_system_bus_shutdown,
.dev_groups = ps3_system_bus_dev_groups,
};
static int __init ps3_system_bus_init(void)
{
int result;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
mutex_init(&usage_hack.mutex);
result = device_register(&ps3_system_bus);
BUG_ON(result);
result = bus_register(&ps3_system_bus_type);
BUG_ON(result);
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return result;
}
core_initcall(ps3_system_bus_init);
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
static void * ps3_alloc_coherent(struct device *_dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
int result;
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
unsigned long virt_addr;
flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
flag |= __GFP_ZERO;
virt_addr = __get_free_pages(flag, get_order(size));
if (!virt_addr) {
pr_debug("%s:%d: get_free_pages failed\n", __func__, __LINE__);
goto clean_none;
}
result = ps3_dma_map(dev->d_region, virt_addr, size, dma_handle,
CBE_IOPTE_PP_W | CBE_IOPTE_PP_R |
CBE_IOPTE_SO_RW | CBE_IOPTE_M);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
__func__, __LINE__, result);
BUG_ON("check region type");
goto clean_alloc;
}
return (void*)virt_addr;
clean_alloc:
free_pages(virt_addr, get_order(size));
clean_none:
dma_handle = NULL;
return NULL;
}
static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
ps3_dma_unmap(dev->d_region, dma_handle, size);
free_pages((unsigned long)vaddr, get_order(size));
}
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address passed here
* comprises a page address and offset into that page. The dma_addr_t
* returned will point to the same byte within the page as was passed in.
*/
static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
dma_addr_t bus_addr;
void *ptr = page_address(page) + offset;
result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
&bus_addr,
CBE_IOPTE_PP_R | CBE_IOPTE_PP_W |
CBE_IOPTE_SO_RW | CBE_IOPTE_M);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
__func__, __LINE__, result);
}
return bus_addr;
}
static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
dma_addr_t bus_addr;
u64 iopte_flag;
void *ptr = page_address(page) + offset;
iopte_flag = CBE_IOPTE_M;
switch (direction) {
case DMA_BIDIRECTIONAL:
iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
break;
case DMA_TO_DEVICE:
iopte_flag |= CBE_IOPTE_PP_R | CBE_IOPTE_SO_R;
break;
case DMA_FROM_DEVICE:
iopte_flag |= CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
break;
default:
/* not happened */
BUG();
}
result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
&bus_addr, iopte_flag);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
__func__, __LINE__, result);
}
return bus_addr;
}
static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction, unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
result = ps3_dma_unmap(dev->d_region, dma_addr, size);
if (result) {
pr_debug("%s:%d: ps3_dma_unmap failed (%d)\n",
__func__, __LINE__, result);
}
}
static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction, unsigned long attrs)
{
#if defined(CONFIG_PS3_DYNAMIC_DMA)
BUG_ON("do");
return -EPERM;
#else
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i) {
int result = ps3_dma_map(dev->d_region, sg_phys(sg),
sg->length, &sg->dma_address, 0);
if (result) {
pr_debug("%s:%d: ps3_dma_map failed (%d)\n",
__func__, __LINE__, result);
return -EINVAL;
}
sg->dma_length = sg->length;
}
return nents;
#endif
}
static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg,
int nents,
enum dma_data_direction direction,
unsigned long attrs)
{
BUG();
return -EINVAL;
}
static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction, unsigned long attrs)
{
#if defined(CONFIG_PS3_DYNAMIC_DMA)
BUG_ON("do");
#endif
}
static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
BUG();
}
static int ps3_dma_supported(struct device *_dev, u64 mask)
{
return mask >= DMA_BIT_MASK(32);
}
static const struct dma_map_ops ps3_sb_dma_ops = {
.alloc = ps3_alloc_coherent,
.free = ps3_free_coherent,
.map_sg = ps3_sb_map_sg,
.unmap_sg = ps3_sb_unmap_sg,
.dma_supported = ps3_dma_supported,
.map_page = ps3_sb_map_page,
.unmap_page = ps3_unmap_page,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
static const struct dma_map_ops ps3_ioc0_dma_ops = {
.alloc = ps3_alloc_coherent,
.free = ps3_free_coherent,
.map_sg = ps3_ioc0_map_sg,
.unmap_sg = ps3_ioc0_unmap_sg,
.dma_supported = ps3_dma_supported,
.map_page = ps3_ioc0_map_page,
.unmap_page = ps3_unmap_page,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
/**
* ps3_system_bus_release_device - remove a device from the system bus
*/
static void ps3_system_bus_release_device(struct device *_dev)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
kfree(dev);
}
/**
* ps3_system_bus_device_register - add a device to the system bus
*
* ps3_system_bus_device_register() expects the dev object to be allocated
* dynamically by the caller. The system bus takes ownership of the dev
* object and frees the object in ps3_system_bus_release_device().
*/
int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
{
int result;
static unsigned int dev_ioc0_count;
static unsigned int dev_sb_count;
static unsigned int dev_vuart_count;
static unsigned int dev_lpm_count;
if (!dev->core.parent)
dev->core.parent = &ps3_system_bus;
dev->core.bus = &ps3_system_bus_type;
dev->core.release = ps3_system_bus_release_device;
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_IOC0:
dev->core.dma_ops = &ps3_ioc0_dma_ops;
dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count);
break;
case PS3_DEVICE_TYPE_SB:
dev->core.dma_ops = &ps3_sb_dma_ops;
dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count);
break;
case PS3_DEVICE_TYPE_VUART:
dev_set_name(&dev->core, "vuart_%02x", ++dev_vuart_count);
break;
case PS3_DEVICE_TYPE_LPM:
dev_set_name(&dev->core, "lpm_%02x", ++dev_lpm_count);
break;
default:
BUG();
}
dev->core.of_node = NULL;
set_dev_node(&dev->core, 0);
pr_debug("%s:%d add %s\n", __func__, __LINE__, dev_name(&dev->core));
result = device_register(&dev->core);
return result;
}
EXPORT_SYMBOL_GPL(ps3_system_bus_device_register);
int ps3_system_bus_driver_register(struct ps3_system_bus_driver *drv)
{
int result;
pr_debug(" -> %s:%d: %s\n", __func__, __LINE__, drv->core.name);
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
drv->core.bus = &ps3_system_bus_type;
result = driver_register(&drv->core);
pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, drv->core.name);
return result;
}
EXPORT_SYMBOL_GPL(ps3_system_bus_driver_register);
void ps3_system_bus_driver_unregister(struct ps3_system_bus_driver *drv)
{
pr_debug(" -> %s:%d: %s\n", __func__, __LINE__, drv->core.name);
driver_unregister(&drv->core);
pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, drv->core.name);
}
EXPORT_SYMBOL_GPL(ps3_system_bus_driver_unregister);
| linux-master | arch/powerpc/platforms/ps3/system-bus.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* udbg debug output routine via GELIC UDP broadcasts
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2006, 2007 Sony Corporation
* Copyright (C) 2010 Hector Martin <[email protected]>
* Copyright (C) 2011 Andre Heider <[email protected]>
*/
#include <linux/if_ether.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <asm/io.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#define GELIC_BUS_ID 1
#define GELIC_DEVICE_ID 0
#define GELIC_DEBUG_PORT 18194
#define GELIC_MAX_MESSAGE_SIZE 1000
#define GELIC_LV1_GET_MAC_ADDRESS 1
#define GELIC_LV1_GET_VLAN_ID 4
#define GELIC_LV1_VLAN_TX_ETHERNET_0 2
#define GELIC_DESCR_DMA_STAT_MASK 0xf0000000
#define GELIC_DESCR_DMA_CARDOWNED 0xa0000000
#define GELIC_DESCR_TX_DMA_IKE 0x00080000
#define GELIC_DESCR_TX_DMA_NO_CHKSUM 0x00000000
#define GELIC_DESCR_TX_DMA_FRAME_TAIL 0x00040000
#define GELIC_DESCR_DMA_CMD_NO_CHKSUM (GELIC_DESCR_DMA_CARDOWNED | \
GELIC_DESCR_TX_DMA_IKE | \
GELIC_DESCR_TX_DMA_NO_CHKSUM)
static u64 bus_addr;
struct gelic_descr {
/* as defined by the hardware */
__be32 buf_addr;
__be32 buf_size;
__be32 next_descr_addr;
__be32 dmac_cmd_status;
__be32 result_size;
__be32 valid_size; /* all zeroes for tx */
__be32 data_status;
__be32 data_error; /* all zeroes for tx */
} __attribute__((aligned(32)));
struct debug_block {
struct gelic_descr descr;
u8 pkt[1520];
} __packed;
static __iomem struct ethhdr *h_eth;
static __iomem struct vlan_hdr *h_vlan;
static __iomem struct iphdr *h_ip;
static __iomem struct udphdr *h_udp;
static __iomem char *pmsg;
static __iomem char *pmsgc;
static __iomem struct debug_block dbg __attribute__((aligned(32)));
static int header_size;
static void map_dma_mem(int bus_id, int dev_id, void *start, size_t len,
u64 *real_bus_addr)
{
s64 result;
u64 real_addr = ((u64)start) & 0x0fffffffffffffffUL;
u64 real_end = real_addr + len;
u64 map_start = real_addr & ~0xfff;
u64 map_end = (real_end + 0xfff) & ~0xfff;
u64 bus_addr = 0;
u64 flags = 0xf800000000000000UL;
result = lv1_allocate_device_dma_region(bus_id, dev_id,
map_end - map_start, 12, 0,
&bus_addr);
if (result)
lv1_panic(0);
result = lv1_map_device_dma_region(bus_id, dev_id, map_start,
bus_addr, map_end - map_start,
flags);
if (result)
lv1_panic(0);
*real_bus_addr = bus_addr + real_addr - map_start;
}
static int unmap_dma_mem(int bus_id, int dev_id, u64 bus_addr, size_t len)
{
s64 result;
u64 real_bus_addr;
real_bus_addr = bus_addr & ~0xfff;
len += bus_addr - real_bus_addr;
len = (len + 0xfff) & ~0xfff;
result = lv1_unmap_device_dma_region(bus_id, dev_id, real_bus_addr,
len);
if (result)
return result;
return lv1_free_device_dma_region(bus_id, dev_id, real_bus_addr);
}
static void __init gelic_debug_init(void)
{
s64 result;
u64 v2;
u64 mac;
u64 vlan_id;
result = lv1_open_device(GELIC_BUS_ID, GELIC_DEVICE_ID, 0);
if (result)
lv1_panic(0);
map_dma_mem(GELIC_BUS_ID, GELIC_DEVICE_ID, &dbg, sizeof(dbg),
&bus_addr);
memset(&dbg, 0, sizeof(dbg));
dbg.descr.buf_addr = bus_addr + offsetof(struct debug_block, pkt);
wmb();
result = lv1_net_control(GELIC_BUS_ID, GELIC_DEVICE_ID,
GELIC_LV1_GET_MAC_ADDRESS, 0, 0, 0,
&mac, &v2);
if (result)
lv1_panic(0);
mac <<= 16;
h_eth = (struct ethhdr *)dbg.pkt;
eth_broadcast_addr(h_eth->h_dest);
memcpy(&h_eth->h_source, &mac, ETH_ALEN);
header_size = sizeof(struct ethhdr);
result = lv1_net_control(GELIC_BUS_ID, GELIC_DEVICE_ID,
GELIC_LV1_GET_VLAN_ID,
GELIC_LV1_VLAN_TX_ETHERNET_0, 0, 0,
&vlan_id, &v2);
if (!result) {
h_eth->h_proto= ETH_P_8021Q;
header_size += sizeof(struct vlan_hdr);
h_vlan = (struct vlan_hdr *)(h_eth + 1);
h_vlan->h_vlan_TCI = vlan_id;
h_vlan->h_vlan_encapsulated_proto = ETH_P_IP;
h_ip = (struct iphdr *)(h_vlan + 1);
} else {
h_eth->h_proto= 0x0800;
h_ip = (struct iphdr *)(h_eth + 1);
}
header_size += sizeof(struct iphdr);
h_ip->version = 4;
h_ip->ihl = 5;
h_ip->ttl = 10;
h_ip->protocol = 0x11;
h_ip->saddr = 0x00000000;
h_ip->daddr = 0xffffffff;
header_size += sizeof(struct udphdr);
h_udp = (struct udphdr *)(h_ip + 1);
h_udp->source = GELIC_DEBUG_PORT;
h_udp->dest = GELIC_DEBUG_PORT;
pmsgc = pmsg = (char *)(h_udp + 1);
}
static void gelic_debug_shutdown(void)
{
if (bus_addr)
unmap_dma_mem(GELIC_BUS_ID, GELIC_DEVICE_ID,
bus_addr, sizeof(dbg));
lv1_close_device(GELIC_BUS_ID, GELIC_DEVICE_ID);
}
static void gelic_sendbuf(int msgsize)
{
u16 *p;
u32 sum;
int i;
dbg.descr.buf_size = header_size + msgsize;
h_ip->tot_len = msgsize + sizeof(struct udphdr) +
sizeof(struct iphdr);
h_udp->len = msgsize + sizeof(struct udphdr);
h_ip->check = 0;
sum = 0;
p = (u16 *)h_ip;
for (i = 0; i < 5; i++)
sum += *p++;
h_ip->check = ~(sum + (sum >> 16));
dbg.descr.dmac_cmd_status = GELIC_DESCR_DMA_CMD_NO_CHKSUM |
GELIC_DESCR_TX_DMA_FRAME_TAIL;
dbg.descr.result_size = 0;
dbg.descr.data_status = 0;
wmb();
lv1_net_start_tx_dma(GELIC_BUS_ID, GELIC_DEVICE_ID, bus_addr, 0);
while ((dbg.descr.dmac_cmd_status & GELIC_DESCR_DMA_STAT_MASK) ==
GELIC_DESCR_DMA_CARDOWNED)
cpu_relax();
}
static void ps3gelic_udbg_putc(char ch)
{
*pmsgc++ = ch;
if (ch == '\n' || (pmsgc-pmsg) >= GELIC_MAX_MESSAGE_SIZE) {
gelic_sendbuf(pmsgc-pmsg);
pmsgc = pmsg;
}
}
void __init udbg_init_ps3gelic(void)
{
gelic_debug_init();
udbg_putc = ps3gelic_udbg_putc;
}
void udbg_shutdown_ps3gelic(void)
{
udbg_putc = NULL;
gelic_debug_shutdown();
}
EXPORT_SYMBOL(udbg_shutdown_ps3gelic);
| linux-master | arch/powerpc/platforms/ps3/gelic_udbg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 flash memory os area.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
#include <linux/syscalls.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/slab.h>
#include "platform.h"
enum {
OS_AREA_SEGMENT_SIZE = 0X200,
};
enum os_area_ldr_format {
HEADER_LDR_FORMAT_RAW = 0,
HEADER_LDR_FORMAT_GZIP = 1,
};
#define OS_AREA_HEADER_MAGIC_NUM "cell_ext_os_area"
/**
* struct os_area_header - os area header segment.
* @magic_num: Always 'cell_ext_os_area'.
* @hdr_version: Header format version number.
* @db_area_offset: Starting segment number of other os database area.
* @ldr_area_offset: Starting segment number of bootloader image area.
* @ldr_format: HEADER_LDR_FORMAT flag.
* @ldr_size: Size of bootloader image in bytes.
*
* Note that the docs refer to area offsets. These are offsets in units of
* segments from the start of the os area (top of the header). These are
* better thought of as segment numbers. The os area of the os area is
* reserved for the os image.
*/
struct os_area_header {
u8 magic_num[16];
u32 hdr_version;
u32 db_area_offset;
u32 ldr_area_offset;
u32 _reserved_1;
u32 ldr_format;
u32 ldr_size;
u32 _reserved_2[6];
};
enum os_area_boot_flag {
PARAM_BOOT_FLAG_GAME_OS = 0,
PARAM_BOOT_FLAG_OTHER_OS = 1,
};
enum os_area_ctrl_button {
PARAM_CTRL_BUTTON_O_IS_YES = 0,
PARAM_CTRL_BUTTON_X_IS_YES = 1,
};
/**
* struct os_area_params - os area params segment.
* @boot_flag: User preference of operating system, PARAM_BOOT_FLAG flag.
* @num_params: Number of params in this (params) segment.
* @rtc_diff: Difference in seconds between 1970 and the ps3 rtc value.
* @av_multi_out: User preference of AV output, PARAM_AV_MULTI_OUT flag.
* @ctrl_button: User preference of controller button config, PARAM_CTRL_BUTTON
* flag.
* @static_ip_addr: User preference of static IP address.
* @network_mask: User preference of static network mask.
* @default_gateway: User preference of static default gateway.
* @dns_primary: User preference of static primary dns server.
* @dns_secondary: User preference of static secondary dns server.
*
* The ps3 rtc maintains a read-only value that approximates seconds since
* 2000-01-01 00:00:00 UTC.
*
* User preference of zero for static_ip_addr means use dhcp.
*/
struct os_area_params {
u32 boot_flag;
u32 _reserved_1[3];
u32 num_params;
u32 _reserved_2[3];
/* param 0 */
s64 rtc_diff;
u8 av_multi_out;
u8 ctrl_button;
u8 _reserved_3[6];
/* param 1 */
u8 static_ip_addr[4];
u8 network_mask[4];
u8 default_gateway[4];
u8 _reserved_4[4];
/* param 2 */
u8 dns_primary[4];
u8 dns_secondary[4];
u8 _reserved_5[8];
};
#define OS_AREA_DB_MAGIC_NUM "-db-"
/**
* struct os_area_db - Shared flash memory database.
* @magic_num: Always '-db-'.
* @version: os_area_db format version number.
* @index_64: byte offset of the database id index for 64 bit variables.
* @count_64: number of usable 64 bit index entries
* @index_32: byte offset of the database id index for 32 bit variables.
* @count_32: number of usable 32 bit index entries
* @index_16: byte offset of the database id index for 16 bit variables.
* @count_16: number of usable 16 bit index entries
*
* Flash rom storage for exclusive use by guests running in the other os lpar.
* The current system configuration allocates 1K (two segments) for other os
* use.
*/
struct os_area_db {
u8 magic_num[4];
u16 version;
u16 _reserved_1;
u16 index_64;
u16 count_64;
u16 index_32;
u16 count_32;
u16 index_16;
u16 count_16;
u32 _reserved_2;
u8 _db_data[1000];
};
/**
* enum os_area_db_owner - Data owners.
*/
enum os_area_db_owner {
OS_AREA_DB_OWNER_ANY = -1,
OS_AREA_DB_OWNER_NONE = 0,
OS_AREA_DB_OWNER_PROTOTYPE = 1,
OS_AREA_DB_OWNER_LINUX = 2,
OS_AREA_DB_OWNER_PETITBOOT = 3,
OS_AREA_DB_OWNER_MAX = 32,
};
enum os_area_db_key {
OS_AREA_DB_KEY_ANY = -1,
OS_AREA_DB_KEY_NONE = 0,
OS_AREA_DB_KEY_RTC_DIFF = 1,
OS_AREA_DB_KEY_VIDEO_MODE = 2,
OS_AREA_DB_KEY_MAX = 8,
};
struct os_area_db_id {
int owner;
int key;
};
static const struct os_area_db_id os_area_db_id_empty = {
.owner = OS_AREA_DB_OWNER_NONE,
.key = OS_AREA_DB_KEY_NONE
};
static const struct os_area_db_id os_area_db_id_any = {
.owner = OS_AREA_DB_OWNER_ANY,
.key = OS_AREA_DB_KEY_ANY
};
static const struct os_area_db_id os_area_db_id_rtc_diff = {
.owner = OS_AREA_DB_OWNER_LINUX,
.key = OS_AREA_DB_KEY_RTC_DIFF
};
#define SECONDS_FROM_1970_TO_2000 946684800LL
/**
* struct saved_params - Static working copies of data from the PS3 'os area'.
*
* The order of preference we use for the rtc_diff source:
* 1) The database value.
* 2) The game os value.
* 3) The number of seconds from 1970 to 2000.
*/
static struct saved_params {
unsigned int valid;
s64 rtc_diff;
unsigned int av_multi_out;
} saved_params;
static struct property property_rtc_diff = {
.name = "linux,rtc_diff",
.length = sizeof(saved_params.rtc_diff),
.value = &saved_params.rtc_diff,
};
static struct property property_av_multi_out = {
.name = "linux,av_multi_out",
.length = sizeof(saved_params.av_multi_out),
.value = &saved_params.av_multi_out,
};
static DEFINE_MUTEX(os_area_flash_mutex);
static const struct ps3_os_area_flash_ops *os_area_flash_ops;
void ps3_os_area_flash_register(const struct ps3_os_area_flash_ops *ops)
{
mutex_lock(&os_area_flash_mutex);
os_area_flash_ops = ops;
mutex_unlock(&os_area_flash_mutex);
}
EXPORT_SYMBOL_GPL(ps3_os_area_flash_register);
static ssize_t os_area_flash_read(void *buf, size_t count, loff_t pos)
{
ssize_t res = -ENODEV;
mutex_lock(&os_area_flash_mutex);
if (os_area_flash_ops)
res = os_area_flash_ops->read(buf, count, pos);
mutex_unlock(&os_area_flash_mutex);
return res;
}
static ssize_t os_area_flash_write(const void *buf, size_t count, loff_t pos)
{
ssize_t res = -ENODEV;
mutex_lock(&os_area_flash_mutex);
if (os_area_flash_ops)
res = os_area_flash_ops->write(buf, count, pos);
mutex_unlock(&os_area_flash_mutex);
return res;
}
/**
* os_area_set_property - Add or overwrite a saved_params value to the device tree.
*
* Overwrites an existing property.
*/
static void os_area_set_property(struct device_node *node,
struct property *prop)
{
int result;
struct property *tmp = of_find_property(node, prop->name, NULL);
if (tmp) {
pr_debug("%s:%d found %s\n", __func__, __LINE__, prop->name);
of_remove_property(node, tmp);
}
result = of_add_property(node, prop);
if (result)
pr_debug("%s:%d of_set_property failed\n", __func__,
__LINE__);
}
/**
* os_area_get_property - Get a saved_params value from the device tree.
*
*/
static void __init os_area_get_property(struct device_node *node,
struct property *prop)
{
const struct property *tmp = of_find_property(node, prop->name, NULL);
if (tmp) {
BUG_ON(prop->length != tmp->length);
memcpy(prop->value, tmp->value, prop->length);
} else
pr_debug("%s:%d not found %s\n", __func__, __LINE__,
prop->name);
}
static void dump_field(char *s, const u8 *field, int size_of_field)
{
#if defined(DEBUG)
int i;
for (i = 0; i < size_of_field; i++)
s[i] = isprint(field[i]) ? field[i] : '.';
s[i] = 0;
#endif
}
#define dump_header(_a) _dump_header(_a, __func__, __LINE__)
static void _dump_header(const struct os_area_header *h, const char *func,
int line)
{
char str[sizeof(h->magic_num) + 1];
dump_field(str, h->magic_num, sizeof(h->magic_num));
pr_debug("%s:%d: h.magic_num: '%s'\n", func, line,
str);
pr_debug("%s:%d: h.hdr_version: %u\n", func, line,
h->hdr_version);
pr_debug("%s:%d: h.db_area_offset: %u\n", func, line,
h->db_area_offset);
pr_debug("%s:%d: h.ldr_area_offset: %u\n", func, line,
h->ldr_area_offset);
pr_debug("%s:%d: h.ldr_format: %u\n", func, line,
h->ldr_format);
pr_debug("%s:%d: h.ldr_size: %xh\n", func, line,
h->ldr_size);
}
#define dump_params(_a) _dump_params(_a, __func__, __LINE__)
static void _dump_params(const struct os_area_params *p, const char *func,
int line)
{
pr_debug("%s:%d: p.boot_flag: %u\n", func, line, p->boot_flag);
pr_debug("%s:%d: p.num_params: %u\n", func, line, p->num_params);
pr_debug("%s:%d: p.rtc_diff %lld\n", func, line, p->rtc_diff);
pr_debug("%s:%d: p.av_multi_out %u\n", func, line, p->av_multi_out);
pr_debug("%s:%d: p.ctrl_button: %u\n", func, line, p->ctrl_button);
pr_debug("%s:%d: p.static_ip_addr: %u.%u.%u.%u\n", func, line,
p->static_ip_addr[0], p->static_ip_addr[1],
p->static_ip_addr[2], p->static_ip_addr[3]);
pr_debug("%s:%d: p.network_mask: %u.%u.%u.%u\n", func, line,
p->network_mask[0], p->network_mask[1],
p->network_mask[2], p->network_mask[3]);
pr_debug("%s:%d: p.default_gateway: %u.%u.%u.%u\n", func, line,
p->default_gateway[0], p->default_gateway[1],
p->default_gateway[2], p->default_gateway[3]);
pr_debug("%s:%d: p.dns_primary: %u.%u.%u.%u\n", func, line,
p->dns_primary[0], p->dns_primary[1],
p->dns_primary[2], p->dns_primary[3]);
pr_debug("%s:%d: p.dns_secondary: %u.%u.%u.%u\n", func, line,
p->dns_secondary[0], p->dns_secondary[1],
p->dns_secondary[2], p->dns_secondary[3]);
}
static int verify_header(const struct os_area_header *header)
{
if (memcmp(header->magic_num, OS_AREA_HEADER_MAGIC_NUM,
sizeof(header->magic_num))) {
pr_debug("%s:%d magic_num failed\n", __func__, __LINE__);
return -1;
}
if (header->hdr_version < 1) {
pr_debug("%s:%d hdr_version failed\n", __func__, __LINE__);
return -1;
}
if (header->db_area_offset > header->ldr_area_offset) {
pr_debug("%s:%d offsets failed\n", __func__, __LINE__);
return -1;
}
return 0;
}
static int db_verify(const struct os_area_db *db)
{
if (memcmp(db->magic_num, OS_AREA_DB_MAGIC_NUM,
sizeof(db->magic_num))) {
pr_debug("%s:%d magic_num failed\n", __func__, __LINE__);
return -EINVAL;
}
if (db->version != 1) {
pr_debug("%s:%d version failed\n", __func__, __LINE__);
return -EINVAL;
}
return 0;
}
struct db_index {
uint8_t owner:5;
uint8_t key:3;
};
struct db_iterator {
const struct os_area_db *db;
struct os_area_db_id match_id;
struct db_index *idx;
struct db_index *last_idx;
union {
uint64_t *value_64;
uint32_t *value_32;
uint16_t *value_16;
};
};
static unsigned int db_align_up(unsigned int val, unsigned int size)
{
return (val + (size - 1)) & (~(size - 1));
}
/**
* db_for_each_64 - Iterator for 64 bit entries.
*
* A NULL value for id can be used to match all entries.
* OS_AREA_DB_OWNER_ANY and OS_AREA_DB_KEY_ANY can be used to match all.
*/
static int db_for_each_64(const struct os_area_db *db,
const struct os_area_db_id *match_id, struct db_iterator *i)
{
next:
if (!i->db) {
i->db = db;
i->match_id = match_id ? *match_id : os_area_db_id_any;
i->idx = (void *)db + db->index_64;
i->last_idx = i->idx + db->count_64;
i->value_64 = (void *)db + db->index_64
+ db_align_up(db->count_64, 8);
} else {
i->idx++;
i->value_64++;
}
if (i->idx >= i->last_idx) {
pr_debug("%s:%d: reached end\n", __func__, __LINE__);
return 0;
}
if (i->match_id.owner != OS_AREA_DB_OWNER_ANY
&& i->match_id.owner != (int)i->idx->owner)
goto next;
if (i->match_id.key != OS_AREA_DB_KEY_ANY
&& i->match_id.key != (int)i->idx->key)
goto next;
return 1;
}
static int db_delete_64(struct os_area_db *db, const struct os_area_db_id *id)
{
struct db_iterator i;
for (i.db = NULL; db_for_each_64(db, id, &i); ) {
pr_debug("%s:%d: got (%d:%d) %llxh\n", __func__, __LINE__,
i.idx->owner, i.idx->key,
(unsigned long long)*i.value_64);
i.idx->owner = 0;
i.idx->key = 0;
*i.value_64 = 0;
}
return 0;
}
static int db_set_64(struct os_area_db *db, const struct os_area_db_id *id,
uint64_t value)
{
struct db_iterator i;
pr_debug("%s:%d: (%d:%d) <= %llxh\n", __func__, __LINE__,
id->owner, id->key, (unsigned long long)value);
if (!id->owner || id->owner == OS_AREA_DB_OWNER_ANY
|| id->key == OS_AREA_DB_KEY_ANY) {
pr_debug("%s:%d: bad id: (%d:%d)\n", __func__,
__LINE__, id->owner, id->key);
return -1;
}
db_delete_64(db, id);
i.db = NULL;
if (db_for_each_64(db, &os_area_db_id_empty, &i)) {
pr_debug("%s:%d: got (%d:%d) %llxh\n", __func__, __LINE__,
i.idx->owner, i.idx->key,
(unsigned long long)*i.value_64);
i.idx->owner = id->owner;
i.idx->key = id->key;
*i.value_64 = value;
pr_debug("%s:%d: set (%d:%d) <= %llxh\n", __func__, __LINE__,
i.idx->owner, i.idx->key,
(unsigned long long)*i.value_64);
return 0;
}
pr_debug("%s:%d: database full.\n",
__func__, __LINE__);
return -1;
}
static int __init db_get_64(const struct os_area_db *db,
const struct os_area_db_id *id, uint64_t *value)
{
struct db_iterator i;
i.db = NULL;
if (db_for_each_64(db, id, &i)) {
*value = *i.value_64;
pr_debug("%s:%d: found %lld\n", __func__, __LINE__,
(long long int)*i.value_64);
return 0;
}
pr_debug("%s:%d: not found\n", __func__, __LINE__);
return -1;
}
static int __init db_get_rtc_diff(const struct os_area_db *db, int64_t *rtc_diff)
{
return db_get_64(db, &os_area_db_id_rtc_diff, (uint64_t*)rtc_diff);
}
#define dump_db(a) _dump_db(a, __func__, __LINE__)
static void _dump_db(const struct os_area_db *db, const char *func,
int line)
{
char str[sizeof(db->magic_num) + 1];
dump_field(str, db->magic_num, sizeof(db->magic_num));
pr_debug("%s:%d: db.magic_num: '%s'\n", func, line,
str);
pr_debug("%s:%d: db.version: %u\n", func, line,
db->version);
pr_debug("%s:%d: db.index_64: %u\n", func, line,
db->index_64);
pr_debug("%s:%d: db.count_64: %u\n", func, line,
db->count_64);
pr_debug("%s:%d: db.index_32: %u\n", func, line,
db->index_32);
pr_debug("%s:%d: db.count_32: %u\n", func, line,
db->count_32);
pr_debug("%s:%d: db.index_16: %u\n", func, line,
db->index_16);
pr_debug("%s:%d: db.count_16: %u\n", func, line,
db->count_16);
}
static void os_area_db_init(struct os_area_db *db)
{
enum {
HEADER_SIZE = offsetof(struct os_area_db, _db_data),
INDEX_64_COUNT = 64,
VALUES_64_COUNT = 57,
INDEX_32_COUNT = 64,
VALUES_32_COUNT = 57,
INDEX_16_COUNT = 64,
VALUES_16_COUNT = 57,
};
memset(db, 0, sizeof(struct os_area_db));
memcpy(db->magic_num, OS_AREA_DB_MAGIC_NUM, sizeof(db->magic_num));
db->version = 1;
db->index_64 = HEADER_SIZE;
db->count_64 = VALUES_64_COUNT;
db->index_32 = HEADER_SIZE
+ INDEX_64_COUNT * sizeof(struct db_index)
+ VALUES_64_COUNT * sizeof(u64);
db->count_32 = VALUES_32_COUNT;
db->index_16 = HEADER_SIZE
+ INDEX_64_COUNT * sizeof(struct db_index)
+ VALUES_64_COUNT * sizeof(u64)
+ INDEX_32_COUNT * sizeof(struct db_index)
+ VALUES_32_COUNT * sizeof(u32);
db->count_16 = VALUES_16_COUNT;
/* Rules to check db layout. */
BUILD_BUG_ON(sizeof(struct db_index) != 1);
BUILD_BUG_ON(sizeof(struct os_area_db) != 2 * OS_AREA_SEGMENT_SIZE);
BUILD_BUG_ON(INDEX_64_COUNT & 0x7);
BUILD_BUG_ON(VALUES_64_COUNT > INDEX_64_COUNT);
BUILD_BUG_ON(INDEX_32_COUNT & 0x7);
BUILD_BUG_ON(VALUES_32_COUNT > INDEX_32_COUNT);
BUILD_BUG_ON(INDEX_16_COUNT & 0x7);
BUILD_BUG_ON(VALUES_16_COUNT > INDEX_16_COUNT);
BUILD_BUG_ON(HEADER_SIZE
+ INDEX_64_COUNT * sizeof(struct db_index)
+ VALUES_64_COUNT * sizeof(u64)
+ INDEX_32_COUNT * sizeof(struct db_index)
+ VALUES_32_COUNT * sizeof(u32)
+ INDEX_16_COUNT * sizeof(struct db_index)
+ VALUES_16_COUNT * sizeof(u16)
> sizeof(struct os_area_db));
}
/**
* update_flash_db - Helper for os_area_queue_work_handler.
*
*/
static int update_flash_db(void)
{
const unsigned int buf_len = 8 * OS_AREA_SEGMENT_SIZE;
struct os_area_header *header;
ssize_t count;
int error;
loff_t pos;
struct os_area_db* db;
/* Read in header and db from flash. */
header = kmalloc(buf_len, GFP_KERNEL);
if (!header)
return -ENOMEM;
count = os_area_flash_read(header, buf_len, 0);
if (count < 0) {
pr_debug("%s: os_area_flash_read failed %zd\n", __func__,
count);
error = count;
goto fail;
}
pos = header->db_area_offset * OS_AREA_SEGMENT_SIZE;
if (count < OS_AREA_SEGMENT_SIZE || verify_header(header) ||
count < pos) {
pr_debug("%s: verify_header failed\n", __func__);
dump_header(header);
error = -EINVAL;
goto fail;
}
/* Now got a good db offset and some maybe good db data. */
db = (void *)header + pos;
error = db_verify(db);
if (error) {
pr_notice("%s: Verify of flash database failed, formatting.\n",
__func__);
dump_db(db);
os_area_db_init(db);
}
/* Now got good db data. */
db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff);
count = os_area_flash_write(db, sizeof(struct os_area_db), pos);
if (count < 0 || count < sizeof(struct os_area_db)) {
pr_debug("%s: os_area_flash_write failed %zd\n", __func__,
count);
error = count < 0 ? count : -EIO;
}
fail:
kfree(header);
return error;
}
/**
* os_area_queue_work_handler - Asynchronous write handler.
*
* An asynchronous write for flash memory and the device tree. Do not
* call directly, use os_area_queue_work().
*/
static void os_area_queue_work_handler(struct work_struct *work)
{
struct device_node *node;
int error;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
node = of_find_node_by_path("/");
if (node) {
os_area_set_property(node, &property_rtc_diff);
of_node_put(node);
} else
pr_debug("%s:%d of_find_node_by_path failed\n",
__func__, __LINE__);
error = update_flash_db();
if (error)
pr_warn("%s: Could not update FLASH ROM\n", __func__);
pr_debug(" <- %s:%d\n", __func__, __LINE__);
}
static void os_area_queue_work(void)
{
static DECLARE_WORK(q, os_area_queue_work_handler);
wmb();
schedule_work(&q);
}
/**
* ps3_os_area_save_params - Copy data from os area mirror to @saved_params.
*
* For the convenience of the guest the HV makes a copy of the os area in
* flash to a high address in the boot memory region and then puts that RAM
* address and the byte count into the repository for retrieval by the guest.
* We copy the data we want into a static variable and allow the memory setup
* by the HV to be claimed by the memblock manager.
*
* The os area mirror will not be available to a second stage kernel, and
* the header verify will fail. In this case, the saved_params values will
* be set from flash memory or the passed in device tree in ps3_os_area_init().
*/
void __init ps3_os_area_save_params(void)
{
int result;
u64 lpar_addr;
unsigned int size;
struct os_area_header *header;
struct os_area_params *params;
struct os_area_db *db;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
result = ps3_repository_read_boot_dat_info(&lpar_addr, &size);
if (result) {
pr_debug("%s:%d ps3_repository_read_boot_dat_info failed\n",
__func__, __LINE__);
return;
}
header = (struct os_area_header *)__va(lpar_addr);
params = (struct os_area_params *)__va(lpar_addr
+ OS_AREA_SEGMENT_SIZE);
result = verify_header(header);
if (result) {
/* Second stage kernels exit here. */
pr_debug("%s:%d verify_header failed\n", __func__, __LINE__);
dump_header(header);
return;
}
db = (struct os_area_db *)__va(lpar_addr
+ header->db_area_offset * OS_AREA_SEGMENT_SIZE);
dump_header(header);
dump_params(params);
dump_db(db);
result = db_verify(db) || db_get_rtc_diff(db, &saved_params.rtc_diff);
if (result)
saved_params.rtc_diff = params->rtc_diff ? params->rtc_diff
: SECONDS_FROM_1970_TO_2000;
saved_params.av_multi_out = params->av_multi_out;
saved_params.valid = 1;
memset(header, 0, sizeof(*header));
pr_debug(" <- %s:%d\n", __func__, __LINE__);
}
/**
* ps3_os_area_init - Setup os area device tree properties as needed.
*/
void __init ps3_os_area_init(void)
{
struct device_node *node;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
node = of_find_node_by_path("/");
if (!saved_params.valid && node) {
/* Second stage kernels should have a dt entry. */
os_area_get_property(node, &property_rtc_diff);
os_area_get_property(node, &property_av_multi_out);
}
if(!saved_params.rtc_diff)
saved_params.rtc_diff = SECONDS_FROM_1970_TO_2000;
if (node) {
os_area_set_property(node, &property_rtc_diff);
os_area_set_property(node, &property_av_multi_out);
of_node_put(node);
} else
pr_debug("%s:%d of_find_node_by_path failed\n",
__func__, __LINE__);
pr_debug(" <- %s:%d\n", __func__, __LINE__);
}
/**
* ps3_os_area_get_rtc_diff - Returns the rtc diff value.
*/
u64 ps3_os_area_get_rtc_diff(void)
{
return saved_params.rtc_diff;
}
EXPORT_SYMBOL_GPL(ps3_os_area_get_rtc_diff);
/**
* ps3_os_area_set_rtc_diff - Set the rtc diff value.
*
* An asynchronous write is needed to support writing updates from
* the timer interrupt context.
*/
void ps3_os_area_set_rtc_diff(u64 rtc_diff)
{
if (saved_params.rtc_diff != rtc_diff) {
saved_params.rtc_diff = rtc_diff;
os_area_queue_work();
}
}
EXPORT_SYMBOL_GPL(ps3_os_area_set_rtc_diff);
/**
* ps3_os_area_get_av_multi_out - Returns the default video mode.
*/
enum ps3_param_av_multi_out ps3_os_area_get_av_multi_out(void)
{
return saved_params.av_multi_out;
}
EXPORT_SYMBOL_GPL(ps3_os_area_get_av_multi_out);
| linux-master | arch/powerpc/platforms/ps3/os-area.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 platform setup routines.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/root_dev.h>
#include <linux/console.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/time.h>
#include <asm/iommu.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/ps3gpu.h>
#include "platform.h"
#if defined(DEBUG)
#define DBG udbg_printf
#else
#define DBG pr_debug
#endif
/* mutex synchronizing GPU accesses and video mode changes */
DEFINE_MUTEX(ps3_gpu_mutex);
EXPORT_SYMBOL_GPL(ps3_gpu_mutex);
static union ps3_firmware_version ps3_firmware_version;
static char ps3_firmware_version_str[16];
void ps3_get_firmware_version(union ps3_firmware_version *v)
{
*v = ps3_firmware_version;
}
EXPORT_SYMBOL_GPL(ps3_get_firmware_version);
int ps3_compare_firmware_version(u16 major, u16 minor, u16 rev)
{
union ps3_firmware_version x;
x.pad = 0;
x.major = major;
x.minor = minor;
x.rev = rev;
return (ps3_firmware_version.raw > x.raw) -
(ps3_firmware_version.raw < x.raw);
}
EXPORT_SYMBOL_GPL(ps3_compare_firmware_version);
static void ps3_power_save(void)
{
/*
* lv1_pause() puts the PPE thread into inactive state until an
* irq on an unmasked plug exists. MSR[EE] has no effect.
* flags: 0 = wake on DEC interrupt, 1 = ignore DEC interrupt.
*/
lv1_pause(0);
}
static void __noreturn ps3_restart(char *cmd)
{
DBG("%s:%d cmd '%s'\n", __func__, __LINE__, cmd);
smp_send_stop();
ps3_sys_manager_restart(); /* never returns */
}
static void ps3_power_off(void)
{
DBG("%s:%d\n", __func__, __LINE__);
smp_send_stop();
ps3_sys_manager_power_off(); /* never returns */
}
static void __noreturn ps3_halt(void)
{
DBG("%s:%d\n", __func__, __LINE__);
smp_send_stop();
ps3_sys_manager_halt(); /* never returns */
}
static void ps3_panic(char *str)
{
DBG("%s:%d %s\n", __func__, __LINE__, str);
smp_send_stop();
printk("\n");
printk(" System does not reboot automatically.\n");
printk(" Please press POWER button.\n");
printk("\n");
panic_flush_kmsg_end();
while(1)
lv1_pause(1);
}
#if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \
defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
static void __init prealloc(struct ps3_prealloc *p)
{
if (!p->size)
return;
p->address = memblock_alloc(p->size, p->align);
if (!p->address)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, p->size, p->align);
printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
p->address);
}
#endif
#if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE)
struct ps3_prealloc ps3fb_videomemory = {
.name = "ps3fb videomemory",
.size = CONFIG_FB_PS3_DEFAULT_SIZE_M*1024*1024,
.align = 1024*1024 /* the GPU requires 1 MiB alignment */
};
EXPORT_SYMBOL_GPL(ps3fb_videomemory);
#define prealloc_ps3fb_videomemory() prealloc(&ps3fb_videomemory)
static int __init early_parse_ps3fb(char *p)
{
if (!p)
return 1;
ps3fb_videomemory.size = ALIGN(memparse(p, &p),
ps3fb_videomemory.align);
return 0;
}
early_param("ps3fb", early_parse_ps3fb);
#else
#define prealloc_ps3fb_videomemory() do { } while (0)
#endif
#if defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
struct ps3_prealloc ps3flash_bounce_buffer = {
.name = "ps3flash bounce buffer",
.size = 256*1024,
.align = 256*1024
};
EXPORT_SYMBOL_GPL(ps3flash_bounce_buffer);
#define prealloc_ps3flash_bounce_buffer() prealloc(&ps3flash_bounce_buffer)
static int __init early_parse_ps3flash(char *p)
{
if (!p)
return 1;
if (!strcmp(p, "off"))
ps3flash_bounce_buffer.size = 0;
return 0;
}
early_param("ps3flash", early_parse_ps3flash);
#else
#define prealloc_ps3flash_bounce_buffer() do { } while (0)
#endif
static int ps3_set_dabr(unsigned long dabr, unsigned long dabrx)
{
/* Have to set at least one bit in the DABRX */
if (dabrx == 0 && dabr == 0)
dabrx = DABRX_USER;
/* hypervisor only allows us to set BTI, Kernel and user */
dabrx &= DABRX_BTI | DABRX_KERNEL | DABRX_USER;
return lv1_set_dabr(dabr, dabrx) ? -1 : 0;
}
static ssize_t ps3_fw_version_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%s", ps3_firmware_version_str);
}
static int __init ps3_setup_sysfs(void)
{
static struct kobj_attribute attr = __ATTR(fw-version, S_IRUGO,
ps3_fw_version_show, NULL);
static struct kobject *kobj;
int result;
kobj = kobject_create_and_add("ps3", firmware_kobj);
if (!kobj) {
pr_warn("%s:%d: kobject_create_and_add failed.\n", __func__,
__LINE__);
return -ENOMEM;
}
result = sysfs_create_file(kobj, &attr.attr);
if (result) {
pr_warn("%s:%d: sysfs_create_file failed.\n", __func__,
__LINE__);
kobject_put(kobj);
return -ENOMEM;
}
return 0;
}
core_initcall(ps3_setup_sysfs);
static void __init ps3_setup_arch(void)
{
u64 tmp;
DBG(" -> %s:%d\n", __func__, __LINE__);
lv1_get_version_info(&ps3_firmware_version.raw, &tmp);
snprintf(ps3_firmware_version_str, sizeof(ps3_firmware_version_str),
"%u.%u.%u", ps3_firmware_version.major,
ps3_firmware_version.minor, ps3_firmware_version.rev);
printk(KERN_INFO "PS3 firmware version %s\n", ps3_firmware_version_str);
ps3_spu_set_platform();
#ifdef CONFIG_SMP
smp_init_ps3();
#endif
prealloc_ps3fb_videomemory();
prealloc_ps3flash_bounce_buffer();
ppc_md.power_save = ps3_power_save;
ps3_os_area_init();
DBG(" <- %s:%d\n", __func__, __LINE__);
}
static void __init ps3_progress(char *s, unsigned short hex)
{
printk("*** %04x : %s\n", hex, s ? s : "");
}
void __init ps3_early_mm_init(void)
{
unsigned long htab_size;
ps3_mm_init();
ps3_mm_vas_create(&htab_size);
ps3_hpte_init(htab_size);
}
static int __init ps3_probe(void)
{
DBG(" -> %s:%d\n", __func__, __LINE__);
ps3_os_area_save_params();
pm_power_off = ps3_power_off;
DBG(" <- %s:%d\n", __func__, __LINE__);
return 1;
}
#if defined(CONFIG_KEXEC_CORE)
static void ps3_kexec_cpu_down(int crash_shutdown, int secondary)
{
int cpu = smp_processor_id();
DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
ps3_smp_cleanup_cpu(cpu);
ps3_shutdown_IRQ(cpu);
DBG(" <- %s:%d\n", __func__, __LINE__);
}
#endif
define_machine(ps3) {
.name = "PS3",
.compatible = "sony,ps3",
.probe = ps3_probe,
.setup_arch = ps3_setup_arch,
.init_IRQ = ps3_init_IRQ,
.panic = ps3_panic,
.get_boot_time = ps3_get_boot_time,
.set_dabr = ps3_set_dabr,
.calibrate_decr = ps3_calibrate_decr,
.progress = ps3_progress,
.restart = ps3_restart,
.halt = ps3_halt,
#if defined(CONFIG_KEXEC_CORE)
.kexec_cpu_down = ps3_kexec_cpu_down,
#endif
};
| linux-master | arch/powerpc/platforms/ps3/setup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 time and rtc routines.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <asm/firmware.h>
#include <asm/lv1call.h>
#include <asm/ps3.h>
#include "platform.h"
void __init ps3_calibrate_decr(void)
{
int result;
u64 tmp;
result = ps3_repository_read_be_tb_freq(0, &tmp);
BUG_ON(result);
ppc_tb_freq = tmp;
ppc_proc_freq = ppc_tb_freq * 40;
}
static u64 read_rtc(void)
{
int result;
u64 rtc_val;
u64 tb_val;
result = lv1_get_rtc(&rtc_val, &tb_val);
BUG_ON(result);
return rtc_val;
}
time64_t __init ps3_get_boot_time(void)
{
return read_rtc() + ps3_os_area_get_rtc_diff();
}
static int __init ps3_rtc_init(void)
{
struct platform_device *pdev;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);
return PTR_ERR_OR_ZERO(pdev);
}
device_initcall(ps3_rtc_init);
| linux-master | arch/powerpc/platforms/ps3/time.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 hvcall exports for modules.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#define LV1_CALL(name, in, out, num) \
extern s64 _lv1_##name(LV1_##in##_IN_##out##_OUT_ARG_DECL); \
EXPORT_SYMBOL(_lv1_##name);
#include <asm/lv1call.h>
| linux-master | arch/powerpc/platforms/ps3/exports.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 address space management.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <asm/cell-regs.h>
#include <asm/firmware.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/setup.h>
#include "platform.h"
#if defined(DEBUG)
#define DBG udbg_printf
#else
#define DBG pr_devel
#endif
enum {
#if defined(CONFIG_PS3_DYNAMIC_DMA)
USE_DYNAMIC_DMA = 1,
#else
USE_DYNAMIC_DMA = 0,
#endif
};
enum {
PAGE_SHIFT_4K = 12U,
PAGE_SHIFT_64K = 16U,
PAGE_SHIFT_16M = 24U,
};
static unsigned long __init make_page_sizes(unsigned long a, unsigned long b)
{
return (a << 56) | (b << 48);
}
enum {
ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
};
/* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
enum {
HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
};
/*============================================================================*/
/* virtual address space routines */
/*============================================================================*/
/**
* struct mem_region - memory region structure
* @base: base address
* @size: size in bytes
* @offset: difference between base and rm.size
* @destroy: flag if region should be destroyed upon shutdown
*/
struct mem_region {
u64 base;
u64 size;
unsigned long offset;
int destroy;
};
/**
* struct map - address space state variables holder
* @total: total memory available as reported by HV
* @vas_id - HV virtual address space id
* @htab_size: htab size in bytes
*
* The HV virtual address space (vas) allows for hotplug memory regions.
* Memory regions can be created and destroyed in the vas at runtime.
* @rm: real mode (bootmem) region
* @r1: highmem region(s)
*
* ps3 addresses
* virt_addr: a cpu 'translated' effective address
* phys_addr: an address in what Linux thinks is the physical address space
* lpar_addr: an address in the HV virtual address space
* bus_addr: an io controller 'translated' address on a device bus
*/
struct map {
u64 total;
u64 vas_id;
u64 htab_size;
struct mem_region rm;
struct mem_region r1;
};
#define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
static void __maybe_unused _debug_dump_map(const struct map *m,
const char *func, int line)
{
DBG("%s:%d: map.total = %llxh\n", func, line, m->total);
DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size);
DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id);
DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size);
DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base);
DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size);
}
static struct map map;
/**
* ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
* @phys_addr: linux physical address
*/
unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
{
BUG_ON(is_kernel_addr(phys_addr));
return (phys_addr < map.rm.size || phys_addr >= map.total)
? phys_addr : phys_addr + map.r1.offset;
}
EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
/**
* ps3_mm_vas_create - create the virtual address space
*/
void __init ps3_mm_vas_create(unsigned long* htab_size)
{
int result;
u64 start_address;
u64 size;
u64 access_right;
u64 max_page_size;
u64 flags;
result = lv1_query_logical_partition_address_region_info(0,
&start_address, &size, &access_right, &max_page_size,
&flags);
if (result) {
DBG("%s:%d: lv1_query_logical_partition_address_region_info "
"failed: %s\n", __func__, __LINE__,
ps3_result(result));
goto fail;
}
if (max_page_size < PAGE_SHIFT_16M) {
DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
max_page_size);
goto fail;
}
BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
&map.vas_id, &map.htab_size);
if (result) {
DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
__func__, __LINE__, ps3_result(result));
goto fail;
}
result = lv1_select_virtual_address_space(map.vas_id);
if (result) {
DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
__func__, __LINE__, ps3_result(result));
goto fail;
}
*htab_size = map.htab_size;
debug_dump_map(&map);
return;
fail:
panic("ps3_mm_vas_create failed");
}
/**
* ps3_mm_vas_destroy -
*
* called during kexec sequence with MMU off.
*/
notrace void ps3_mm_vas_destroy(void)
{
int result;
if (map.vas_id) {
result = lv1_select_virtual_address_space(0);
result += lv1_destruct_virtual_address_space(map.vas_id);
if (result) {
lv1_panic(0);
}
map.vas_id = 0;
}
}
static int __init ps3_mm_get_repository_highmem(struct mem_region *r)
{
int result;
/* Assume a single highmem region. */
result = ps3_repository_read_highmem_info(0, &r->base, &r->size);
if (result)
goto zero_region;
if (!r->base || !r->size) {
result = -1;
goto zero_region;
}
r->offset = r->base - map.rm.size;
DBG("%s:%d: Found high region in repository: %llxh %llxh\n",
__func__, __LINE__, r->base, r->size);
return 0;
zero_region:
DBG("%s:%d: No high region in repository.\n", __func__, __LINE__);
r->size = r->base = r->offset = 0;
return result;
}
static int ps3_mm_set_repository_highmem(const struct mem_region *r)
{
/* Assume a single highmem region. */
return r ? ps3_repository_write_highmem_info(0, r->base, r->size) :
ps3_repository_write_highmem_info(0, 0, 0);
}
/**
* ps3_mm_region_create - create a memory region in the vas
* @r: pointer to a struct mem_region to accept initialized values
* @size: requested region size
*
* This implementation creates the region with the vas large page size.
* @size is rounded down to a multiple of the vas large page size.
*/
static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
{
int result;
u64 muid;
r->size = ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size);
DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
size - r->size, (size - r->size) / 1024 / 1024);
if (r->size == 0) {
DBG("%s:%d: size == 0\n", __func__, __LINE__);
result = -1;
goto zero_region;
}
result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
if (result || r->base < map.rm.size) {
DBG("%s:%d: lv1_allocate_memory failed: %s\n",
__func__, __LINE__, ps3_result(result));
goto zero_region;
}
r->destroy = 1;
r->offset = r->base - map.rm.size;
return result;
zero_region:
r->size = r->base = r->offset = 0;
return result;
}
/**
* ps3_mm_region_destroy - destroy a memory region
* @r: pointer to struct mem_region
*/
static void ps3_mm_region_destroy(struct mem_region *r)
{
int result;
if (!r->destroy) {
return;
}
if (r->base) {
result = lv1_release_memory(r->base);
if (result) {
lv1_panic(0);
}
r->size = r->base = r->offset = 0;
map.total = map.rm.size;
}
ps3_mm_set_repository_highmem(NULL);
}
/*============================================================================*/
/* dma routines */
/*============================================================================*/
/**
* dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
* @r: pointer to dma region structure
* @lpar_addr: HV lpar address
*/
static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
unsigned long lpar_addr)
{
if (lpar_addr >= map.rm.size)
lpar_addr -= map.r1.offset;
BUG_ON(lpar_addr < r->offset);
BUG_ON(lpar_addr >= r->offset + r->len);
return r->bus_addr + lpar_addr - r->offset;
}
#define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
const char *func, int line)
{
DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id,
r->dev->dev_id);
DBG("%s:%d: page_size %u\n", func, line, r->page_size);
DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
DBG("%s:%d: len %lxh\n", func, line, r->len);
DBG("%s:%d: offset %lxh\n", func, line, r->offset);
}
/**
* dma_chunk - A chunk of dma pages mapped by the io controller.
* @region - The dma region that owns this chunk.
* @lpar_addr: Starting lpar address of the area to map.
* @bus_addr: Starting ioc bus address of the area to map.
* @len: Length in bytes of the area to map.
* @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
* list of all chunks owned by the region.
*
* This implementation uses a very simple dma page manager
* based on the dma_chunk structure. This scheme assumes
* that all drivers use very well behaved dma ops.
*/
struct dma_chunk {
struct ps3_dma_region *region;
unsigned long lpar_addr;
unsigned long bus_addr;
unsigned long len;
struct list_head link;
unsigned int usage_count;
};
#define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
int line)
{
DBG("%s:%d: r.dev %llu:%llu\n", func, line,
c->region->dev->bus_id, c->region->dev->dev_id);
DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset);
DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
DBG("%s:%d: c.len %lxh\n", func, line, c->len);
}
static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
unsigned long bus_addr, unsigned long len)
{
struct dma_chunk *c;
unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size);
unsigned long aligned_len = ALIGN(len+bus_addr-aligned_bus,
1 << r->page_size);
list_for_each_entry(c, &r->chunk_list.head, link) {
/* intersection */
if (aligned_bus >= c->bus_addr &&
aligned_bus + aligned_len <= c->bus_addr + c->len)
return c;
/* below */
if (aligned_bus + aligned_len <= c->bus_addr)
continue;
/* above */
if (aligned_bus >= c->bus_addr + c->len)
continue;
/* we don't handle the multi-chunk case for now */
dma_dump_chunk(c);
BUG();
}
return NULL;
}
static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
unsigned long lpar_addr, unsigned long len)
{
struct dma_chunk *c;
unsigned long aligned_lpar = ALIGN_DOWN(lpar_addr, 1 << r->page_size);
unsigned long aligned_len = ALIGN(len + lpar_addr - aligned_lpar,
1 << r->page_size);
list_for_each_entry(c, &r->chunk_list.head, link) {
/* intersection */
if (c->lpar_addr <= aligned_lpar &&
aligned_lpar < c->lpar_addr + c->len) {
if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
return c;
else {
dma_dump_chunk(c);
BUG();
}
}
/* below */
if (aligned_lpar + aligned_len <= c->lpar_addr) {
continue;
}
/* above */
if (c->lpar_addr + c->len <= aligned_lpar) {
continue;
}
}
return NULL;
}
static int dma_sb_free_chunk(struct dma_chunk *c)
{
int result = 0;
if (c->bus_addr) {
result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
c->region->dev->dev_id, c->bus_addr, c->len);
BUG_ON(result);
}
kfree(c);
return result;
}
static int dma_ioc0_free_chunk(struct dma_chunk *c)
{
int result = 0;
int iopage;
unsigned long offset;
struct ps3_dma_region *r = c->region;
DBG("%s:start\n", __func__);
for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
offset = (1 << r->page_size) * iopage;
/* put INVALID entry */
result = lv1_put_iopte(0,
c->bus_addr + offset,
c->lpar_addr + offset,
r->ioid,
0);
DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
c->bus_addr + offset,
c->lpar_addr + offset,
r->ioid);
if (result) {
DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
__LINE__, ps3_result(result));
}
}
kfree(c);
DBG("%s:end\n", __func__);
return result;
}
/**
* dma_sb_map_pages - Maps dma pages into the io controller bus address space.
* @r: Pointer to a struct ps3_dma_region.
* @phys_addr: Starting physical address of the area to map.
* @len: Length in bytes of the area to map.
* c_out: A pointer to receive an allocated struct dma_chunk for this area.
*
* This is the lowest level dma mapping routine, and is the one that will
* make the HV call to add the pages into the io controller address space.
*/
static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
{
int result;
struct dma_chunk *c;
c = kzalloc(sizeof(*c), GFP_ATOMIC);
if (!c) {
result = -ENOMEM;
goto fail_alloc;
}
c->region = r;
c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
c->len = len;
BUG_ON(iopte_flag != 0xf800000000000000UL);
result = lv1_map_device_dma_region(c->region->dev->bus_id,
c->region->dev->dev_id, c->lpar_addr,
c->bus_addr, c->len, iopte_flag);
if (result) {
DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
__func__, __LINE__, ps3_result(result));
goto fail_map;
}
list_add(&c->link, &r->chunk_list.head);
*c_out = c;
return 0;
fail_map:
kfree(c);
fail_alloc:
*c_out = NULL;
DBG(" <- %s:%d\n", __func__, __LINE__);
return result;
}
static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
unsigned long len, struct dma_chunk **c_out,
u64 iopte_flag)
{
int result;
struct dma_chunk *c, *last;
int iopage, pages;
unsigned long offset;
DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
c = kzalloc(sizeof(*c), GFP_ATOMIC);
if (!c) {
result = -ENOMEM;
goto fail_alloc;
}
c->region = r;
c->len = len;
c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
/* allocate IO address */
if (list_empty(&r->chunk_list.head)) {
/* first one */
c->bus_addr = r->bus_addr;
} else {
/* derive from last bus addr*/
last = list_entry(r->chunk_list.head.next,
struct dma_chunk, link);
c->bus_addr = last->bus_addr + last->len;
DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
last->bus_addr, last->len);
}
/* FIXME: check whether length exceeds region size */
/* build ioptes for the area */
pages = len >> r->page_size;
DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__,
r->page_size, r->len, pages, iopte_flag);
for (iopage = 0; iopage < pages; iopage++) {
offset = (1 << r->page_size) * iopage;
result = lv1_put_iopte(0,
c->bus_addr + offset,
c->lpar_addr + offset,
r->ioid,
iopte_flag);
if (result) {
pr_warn("%s:%d: lv1_put_iopte failed: %s\n",
__func__, __LINE__, ps3_result(result));
goto fail_map;
}
DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
iopage, c->bus_addr + offset, c->lpar_addr + offset,
r->ioid);
}
/* be sure that last allocated one is inserted at head */
list_add(&c->link, &r->chunk_list.head);
*c_out = c;
DBG("%s: end\n", __func__);
return 0;
fail_map:
for (iopage--; 0 <= iopage; iopage--) {
lv1_put_iopte(0,
c->bus_addr + offset,
c->lpar_addr + offset,
r->ioid,
0);
}
kfree(c);
fail_alloc:
*c_out = NULL;
return result;
}
/**
* dma_sb_region_create - Create a device dma region.
* @r: Pointer to a struct ps3_dma_region.
*
* This is the lowest level dma region create routine, and is the one that
* will make the HV call to create the region.
*/
static int dma_sb_region_create(struct ps3_dma_region *r)
{
int result;
u64 bus_addr;
DBG(" -> %s:%d:\n", __func__, __LINE__);
BUG_ON(!r);
if (!r->dev->bus_id) {
pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
r->dev->bus_id, r->dev->dev_id);
return 0;
}
DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
__LINE__, r->len, r->page_size, r->offset);
BUG_ON(!r->len);
BUG_ON(!r->page_size);
BUG_ON(!r->region_ops);
INIT_LIST_HEAD(&r->chunk_list.head);
spin_lock_init(&r->chunk_list.lock);
result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
roundup_pow_of_two(r->len), r->page_size, r->region_type,
&bus_addr);
r->bus_addr = bus_addr;
if (result) {
DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
__func__, __LINE__, ps3_result(result));
r->len = r->bus_addr = 0;
}
return result;
}
static int dma_ioc0_region_create(struct ps3_dma_region *r)
{
int result;
u64 bus_addr;
INIT_LIST_HEAD(&r->chunk_list.head);
spin_lock_init(&r->chunk_list.lock);
result = lv1_allocate_io_segment(0,
r->len,
r->page_size,
&bus_addr);
r->bus_addr = bus_addr;
if (result) {
DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
__func__, __LINE__, ps3_result(result));
r->len = r->bus_addr = 0;
}
DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
r->len, r->page_size, r->bus_addr);
return result;
}
/**
* dma_region_free - Free a device dma region.
* @r: Pointer to a struct ps3_dma_region.
*
* This is the lowest level dma region free routine, and is the one that
* will make the HV call to free the region.
*/
static int dma_sb_region_free(struct ps3_dma_region *r)
{
int result;
struct dma_chunk *c;
struct dma_chunk *tmp;
BUG_ON(!r);
if (!r->dev->bus_id) {
pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
r->dev->bus_id, r->dev->dev_id);
return 0;
}
list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
list_del(&c->link);
dma_sb_free_chunk(c);
}
result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
r->bus_addr);
if (result)
DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
__func__, __LINE__, ps3_result(result));
r->bus_addr = 0;
return result;
}
static int dma_ioc0_region_free(struct ps3_dma_region *r)
{
int result;
struct dma_chunk *c, *n;
DBG("%s: start\n", __func__);
list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
list_del(&c->link);
dma_ioc0_free_chunk(c);
}
result = lv1_release_io_segment(0, r->bus_addr);
if (result)
DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
__func__, __LINE__, ps3_result(result));
r->bus_addr = 0;
DBG("%s: end\n", __func__);
return result;
}
/**
* dma_sb_map_area - Map an area of memory into a device dma region.
* @r: Pointer to a struct ps3_dma_region.
* @virt_addr: Starting virtual address of the area to map.
* @len: Length in bytes of the area to map.
* @bus_addr: A pointer to return the starting ioc bus address of the area to
* map.
*
* This is the common dma mapping routine.
*/
static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
unsigned long len, dma_addr_t *bus_addr,
u64 iopte_flag)
{
int result;
unsigned long flags;
struct dma_chunk *c;
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
: virt_addr;
unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size);
unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys,
1 << r->page_size);
*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
if (!USE_DYNAMIC_DMA) {
unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
DBG(" -> %s:%d\n", __func__, __LINE__);
DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
virt_addr);
DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
phys_addr);
DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
lpar_addr);
DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__,
*bus_addr, len);
}
spin_lock_irqsave(&r->chunk_list.lock, flags);
c = dma_find_chunk(r, *bus_addr, len);
if (c) {
DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
dma_dump_chunk(c);
c->usage_count++;
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
return 0;
}
result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
if (result) {
*bus_addr = 0;
DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
__func__, __LINE__, result);
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
return result;
}
c->usage_count = 1;
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
return result;
}
static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
unsigned long len, dma_addr_t *bus_addr,
u64 iopte_flag)
{
int result;
unsigned long flags;
struct dma_chunk *c;
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
: virt_addr;
unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size);
unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys,
1 << r->page_size);
DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
virt_addr, len);
DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
phys_addr, aligned_phys, aligned_len);
spin_lock_irqsave(&r->chunk_list.lock, flags);
c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
if (c) {
/* FIXME */
BUG();
*bus_addr = c->bus_addr + phys_addr - aligned_phys;
c->usage_count++;
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
return 0;
}
result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
iopte_flag);
if (result) {
*bus_addr = 0;
DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
__func__, __LINE__, result);
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
return result;
}
*bus_addr = c->bus_addr + phys_addr - aligned_phys;
DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
virt_addr, phys_addr, aligned_phys, *bus_addr);
c->usage_count = 1;
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
return result;
}
/**
* dma_sb_unmap_area - Unmap an area of memory from a device dma region.
* @r: Pointer to a struct ps3_dma_region.
* @bus_addr: The starting ioc bus address of the area to unmap.
* @len: Length in bytes of the area to unmap.
*
* This is the common dma unmap routine.
*/
static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
unsigned long len)
{
unsigned long flags;
struct dma_chunk *c;
spin_lock_irqsave(&r->chunk_list.lock, flags);
c = dma_find_chunk(r, bus_addr, len);
if (!c) {
unsigned long aligned_bus = ALIGN_DOWN(bus_addr,
1 << r->page_size);
unsigned long aligned_len = ALIGN(len + bus_addr
- aligned_bus, 1 << r->page_size);
DBG("%s:%d: not found: bus_addr %llxh\n",
__func__, __LINE__, bus_addr);
DBG("%s:%d: not found: len %lxh\n",
__func__, __LINE__, len);
DBG("%s:%d: not found: aligned_bus %lxh\n",
__func__, __LINE__, aligned_bus);
DBG("%s:%d: not found: aligned_len %lxh\n",
__func__, __LINE__, aligned_len);
BUG();
}
c->usage_count--;
if (!c->usage_count) {
list_del(&c->link);
dma_sb_free_chunk(c);
}
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
return 0;
}
static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
dma_addr_t bus_addr, unsigned long len)
{
unsigned long flags;
struct dma_chunk *c;
DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
spin_lock_irqsave(&r->chunk_list.lock, flags);
c = dma_find_chunk(r, bus_addr, len);
if (!c) {
unsigned long aligned_bus = ALIGN_DOWN(bus_addr,
1 << r->page_size);
unsigned long aligned_len = ALIGN(len + bus_addr
- aligned_bus,
1 << r->page_size);
DBG("%s:%d: not found: bus_addr %llxh\n",
__func__, __LINE__, bus_addr);
DBG("%s:%d: not found: len %lxh\n",
__func__, __LINE__, len);
DBG("%s:%d: not found: aligned_bus %lxh\n",
__func__, __LINE__, aligned_bus);
DBG("%s:%d: not found: aligned_len %lxh\n",
__func__, __LINE__, aligned_len);
BUG();
}
c->usage_count--;
if (!c->usage_count) {
list_del(&c->link);
dma_ioc0_free_chunk(c);
}
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
DBG("%s: end\n", __func__);
return 0;
}
/**
* dma_sb_region_create_linear - Setup a linear dma mapping for a device.
* @r: Pointer to a struct ps3_dma_region.
*
* This routine creates an HV dma region for the device and maps all available
* ram into the io controller bus address space.
*/
static int dma_sb_region_create_linear(struct ps3_dma_region *r)
{
int result;
unsigned long virt_addr, len;
dma_addr_t tmp;
if (r->len > 16*1024*1024) { /* FIXME: need proper fix */
/* force 16M dma pages for linear mapping */
if (r->page_size != PS3_DMA_16M) {
pr_info("%s:%d: forcing 16M pages for linear map\n",
__func__, __LINE__);
r->page_size = PS3_DMA_16M;
r->len = ALIGN(r->len, 1 << r->page_size);
}
}
result = dma_sb_region_create(r);
BUG_ON(result);
if (r->offset < map.rm.size) {
/* Map (part of) 1st RAM chunk */
virt_addr = map.rm.base + r->offset;
len = map.rm.size - r->offset;
if (len > r->len)
len = r->len;
result = dma_sb_map_area(r, virt_addr, len, &tmp,
CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
CBE_IOPTE_M);
BUG_ON(result);
}
if (r->offset + r->len > map.rm.size) {
/* Map (part of) 2nd RAM chunk */
virt_addr = map.rm.size;
len = r->len;
if (r->offset >= map.rm.size)
virt_addr += r->offset - map.rm.size;
else
len -= map.rm.size - r->offset;
result = dma_sb_map_area(r, virt_addr, len, &tmp,
CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
CBE_IOPTE_M);
BUG_ON(result);
}
return result;
}
/**
* dma_sb_region_free_linear - Free a linear dma mapping for a device.
* @r: Pointer to a struct ps3_dma_region.
*
* This routine will unmap all mapped areas and free the HV dma region.
*/
static int dma_sb_region_free_linear(struct ps3_dma_region *r)
{
int result;
dma_addr_t bus_addr;
unsigned long len, lpar_addr;
if (r->offset < map.rm.size) {
/* Unmap (part of) 1st RAM chunk */
lpar_addr = map.rm.base + r->offset;
len = map.rm.size - r->offset;
if (len > r->len)
len = r->len;
bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
result = dma_sb_unmap_area(r, bus_addr, len);
BUG_ON(result);
}
if (r->offset + r->len > map.rm.size) {
/* Unmap (part of) 2nd RAM chunk */
lpar_addr = map.r1.base;
len = r->len;
if (r->offset >= map.rm.size)
lpar_addr += r->offset - map.rm.size;
else
len -= map.rm.size - r->offset;
bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
result = dma_sb_unmap_area(r, bus_addr, len);
BUG_ON(result);
}
result = dma_sb_region_free(r);
BUG_ON(result);
return result;
}
/**
* dma_sb_map_area_linear - Map an area of memory into a device dma region.
* @r: Pointer to a struct ps3_dma_region.
* @virt_addr: Starting virtual address of the area to map.
* @len: Length in bytes of the area to map.
* @bus_addr: A pointer to return the starting ioc bus address of the area to
* map.
*
* This routine just returns the corresponding bus address. Actual mapping
* occurs in dma_region_create_linear().
*/
static int dma_sb_map_area_linear(struct ps3_dma_region *r,
unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr,
u64 iopte_flag)
{
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
: virt_addr;
*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
return 0;
}
/**
* dma_unmap_area_linear - Unmap an area of memory from a device dma region.
* @r: Pointer to a struct ps3_dma_region.
* @bus_addr: The starting ioc bus address of the area to unmap.
* @len: Length in bytes of the area to unmap.
*
* This routine does nothing. Unmapping occurs in dma_sb_region_free_linear().
*/
static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
dma_addr_t bus_addr, unsigned long len)
{
return 0;
};
static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = {
.create = dma_sb_region_create,
.free = dma_sb_region_free,
.map = dma_sb_map_area,
.unmap = dma_sb_unmap_area
};
static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
.create = dma_sb_region_create_linear,
.free = dma_sb_region_free_linear,
.map = dma_sb_map_area_linear,
.unmap = dma_sb_unmap_area_linear
};
static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
.create = dma_ioc0_region_create,
.free = dma_ioc0_region_free,
.map = dma_ioc0_map_area,
.unmap = dma_ioc0_unmap_area
};
int ps3_dma_region_init(struct ps3_system_bus_device *dev,
struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
enum ps3_dma_region_type region_type, void *addr, unsigned long len)
{
unsigned long lpar_addr;
int result;
lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
r->dev = dev;
r->page_size = page_size;
r->region_type = region_type;
r->offset = lpar_addr;
if (r->offset >= map.rm.size)
r->offset -= map.r1.offset;
r->len = len ? len : ALIGN(map.total, 1 << r->page_size);
dev->core.dma_mask = &r->dma_mask;
result = dma_set_mask_and_coherent(&dev->core, DMA_BIT_MASK(32));
if (result < 0) {
dev_err(&dev->core, "%s:%d: dma_set_mask_and_coherent failed: %d\n",
__func__, __LINE__, result);
return result;
}
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_SB:
r->region_ops = (USE_DYNAMIC_DMA)
? &ps3_dma_sb_region_ops
: &ps3_dma_sb_region_linear_ops;
break;
case PS3_DEVICE_TYPE_IOC0:
r->region_ops = &ps3_dma_ioc0_region_ops;
break;
default:
BUG();
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(ps3_dma_region_init);
int ps3_dma_region_create(struct ps3_dma_region *r)
{
BUG_ON(!r);
BUG_ON(!r->region_ops);
BUG_ON(!r->region_ops->create);
return r->region_ops->create(r);
}
EXPORT_SYMBOL(ps3_dma_region_create);
int ps3_dma_region_free(struct ps3_dma_region *r)
{
BUG_ON(!r);
BUG_ON(!r->region_ops);
BUG_ON(!r->region_ops->free);
return r->region_ops->free(r);
}
EXPORT_SYMBOL(ps3_dma_region_free);
int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
unsigned long len, dma_addr_t *bus_addr,
u64 iopte_flag)
{
return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
}
int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
unsigned long len)
{
return r->region_ops->unmap(r, bus_addr, len);
}
/*============================================================================*/
/* system startup routines */
/*============================================================================*/
/**
* ps3_mm_init - initialize the address space state variables
*/
void __init ps3_mm_init(void)
{
int result;
DBG(" -> %s:%d\n", __func__, __LINE__);
result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
&map.total);
if (result)
panic("ps3_repository_read_mm_info() failed");
map.rm.offset = map.rm.base;
map.vas_id = map.htab_size = 0;
/* this implementation assumes map.rm.base is zero */
BUG_ON(map.rm.base);
BUG_ON(!map.rm.size);
/* Check if we got the highmem region from an earlier boot step */
if (ps3_mm_get_repository_highmem(&map.r1)) {
result = ps3_mm_region_create(&map.r1, map.total - map.rm.size);
if (!result)
ps3_mm_set_repository_highmem(&map.r1);
}
/* correct map.total for the real total amount of memory we use */
map.total = map.rm.size + map.r1.size;
if (!map.r1.size) {
DBG("%s:%d: No highmem region found\n", __func__, __LINE__);
} else {
DBG("%s:%d: Adding highmem region: %llxh %llxh\n",
__func__, __LINE__, map.rm.size,
map.total - map.rm.size);
memblock_add(map.rm.size, map.total - map.rm.size);
}
DBG(" <- %s:%d\n", __func__, __LINE__);
}
/**
* ps3_mm_shutdown - final cleanup of address space
*
* called during kexec sequence with MMU off.
*/
notrace void ps3_mm_shutdown(void)
{
ps3_mm_region_destroy(&map.r1);
}
| linux-master | arch/powerpc/platforms/ps3/mm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 repository routines.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <asm/lv1call.h>
#include "platform.h"
enum ps3_vendor_id {
PS3_VENDOR_ID_NONE = 0,
PS3_VENDOR_ID_SONY = 0x8000000000000000UL,
};
enum ps3_lpar_id {
PS3_LPAR_ID_CURRENT = 0,
PS3_LPAR_ID_PME = 1,
};
#define dump_field(_a, _b) _dump_field(_a, _b, __func__, __LINE__)
static void _dump_field(const char *hdr, u64 n, const char *func, int line)
{
#if defined(DEBUG)
char s[16];
const char *const in = (const char *)&n;
unsigned int i;
for (i = 0; i < 8; i++)
s[i] = (in[i] <= 126 && in[i] >= 32) ? in[i] : '.';
s[i] = 0;
pr_devel("%s:%d: %s%016llx : %s\n", func, line, hdr, n, s);
#endif
}
#define dump_node_name(_a, _b, _c, _d, _e) \
_dump_node_name(_a, _b, _c, _d, _e, __func__, __LINE__)
static void _dump_node_name(unsigned int lpar_id, u64 n1, u64 n2, u64 n3,
u64 n4, const char *func, int line)
{
pr_devel("%s:%d: lpar: %u\n", func, line, lpar_id);
_dump_field("n1: ", n1, func, line);
_dump_field("n2: ", n2, func, line);
_dump_field("n3: ", n3, func, line);
_dump_field("n4: ", n4, func, line);
}
#define dump_node(_a, _b, _c, _d, _e, _f, _g) \
_dump_node(_a, _b, _c, _d, _e, _f, _g, __func__, __LINE__)
static void _dump_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
u64 v1, u64 v2, const char *func, int line)
{
pr_devel("%s:%d: lpar: %u\n", func, line, lpar_id);
_dump_field("n1: ", n1, func, line);
_dump_field("n2: ", n2, func, line);
_dump_field("n3: ", n3, func, line);
_dump_field("n4: ", n4, func, line);
pr_devel("%s:%d: v1: %016llx\n", func, line, v1);
pr_devel("%s:%d: v2: %016llx\n", func, line, v2);
}
/**
* make_first_field - Make the first field of a repository node name.
* @text: Text portion of the field.
* @index: Numeric index portion of the field. Use zero for 'don't care'.
*
* This routine sets the vendor id to zero (non-vendor specific).
* Returns field value.
*/
static u64 make_first_field(const char *text, u64 index)
{
u64 n = 0;
memcpy((char *)&n, text, strnlen(text, sizeof(n)));
return PS3_VENDOR_ID_NONE + (n >> 32) + index;
}
/**
* make_field - Make subsequent fields of a repository node name.
* @text: Text portion of the field. Use "" for 'don't care'.
* @index: Numeric index portion of the field. Use zero for 'don't care'.
*
* Returns field value.
*/
static u64 make_field(const char *text, u64 index)
{
u64 n = 0;
memcpy((char *)&n, text, strnlen(text, sizeof(n)));
return n + index;
}
/**
* read_node - Read a repository node from raw fields.
* @n1: First field of node name.
* @n2: Second field of node name. Use zero for 'don't care'.
* @n3: Third field of node name. Use zero for 'don't care'.
* @n4: Fourth field of node name. Use zero for 'don't care'.
* @v1: First repository value (high word).
* @v2: Second repository value (low word). Optional parameter, use zero
* for 'don't care'.
*/
static int read_node(unsigned int lpar_id, u64 n1, u64 n2, u64 n3, u64 n4,
u64 *_v1, u64 *_v2)
{
int result;
u64 v1;
u64 v2;
if (lpar_id == PS3_LPAR_ID_CURRENT) {
u64 id;
lv1_get_logical_partition_id(&id);
lpar_id = id;
}
result = lv1_read_repository_node(lpar_id, n1, n2, n3, n4, &v1,
&v2);
if (result) {
pr_warn("%s:%d: lv1_read_repository_node failed: %s\n",
__func__, __LINE__, ps3_result(result));
dump_node_name(lpar_id, n1, n2, n3, n4);
return -ENOENT;
}
dump_node(lpar_id, n1, n2, n3, n4, v1, v2);
if (_v1)
*_v1 = v1;
if (_v2)
*_v2 = v2;
if (v1 && !_v1)
pr_devel("%s:%d: warning: discarding non-zero v1: %016llx\n",
__func__, __LINE__, v1);
if (v2 && !_v2)
pr_devel("%s:%d: warning: discarding non-zero v2: %016llx\n",
__func__, __LINE__, v2);
return 0;
}
int ps3_repository_read_bus_str(unsigned int bus_index, const char *bus_str,
u64 *value)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field(bus_str, 0),
0, 0,
value, NULL);
}
int ps3_repository_read_bus_id(unsigned int bus_index, u64 *bus_id)
{
return read_node(PS3_LPAR_ID_PME, make_first_field("bus", bus_index),
make_field("id", 0), 0, 0, bus_id, NULL);
}
int ps3_repository_read_bus_type(unsigned int bus_index,
enum ps3_bus_type *bus_type)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("type", 0),
0, 0,
&v1, NULL);
*bus_type = v1;
return result;
}
int ps3_repository_read_bus_num_dev(unsigned int bus_index,
unsigned int *num_dev)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("num_dev", 0),
0, 0,
&v1, NULL);
*num_dev = v1;
return result;
}
int ps3_repository_read_dev_str(unsigned int bus_index,
unsigned int dev_index, const char *dev_str, u64 *value)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field(dev_str, 0),
0,
value, NULL);
}
int ps3_repository_read_dev_id(unsigned int bus_index, unsigned int dev_index,
u64 *dev_id)
{
return read_node(PS3_LPAR_ID_PME, make_first_field("bus", bus_index),
make_field("dev", dev_index), make_field("id", 0), 0,
dev_id, NULL);
}
int ps3_repository_read_dev_type(unsigned int bus_index,
unsigned int dev_index, enum ps3_dev_type *dev_type)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("type", 0),
0,
&v1, NULL);
*dev_type = v1;
return result;
}
int ps3_repository_read_dev_intr(unsigned int bus_index,
unsigned int dev_index, unsigned int intr_index,
enum ps3_interrupt_type *intr_type, unsigned int *interrupt_id)
{
int result;
u64 v1 = 0;
u64 v2 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("intr", intr_index),
0,
&v1, &v2);
*intr_type = v1;
*interrupt_id = v2;
return result;
}
int ps3_repository_read_dev_reg_type(unsigned int bus_index,
unsigned int dev_index, unsigned int reg_index,
enum ps3_reg_type *reg_type)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("reg", reg_index),
make_field("type", 0),
&v1, NULL);
*reg_type = v1;
return result;
}
int ps3_repository_read_dev_reg_addr(unsigned int bus_index,
unsigned int dev_index, unsigned int reg_index, u64 *bus_addr, u64 *len)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("reg", reg_index),
make_field("data", 0),
bus_addr, len);
}
int ps3_repository_read_dev_reg(unsigned int bus_index,
unsigned int dev_index, unsigned int reg_index,
enum ps3_reg_type *reg_type, u64 *bus_addr, u64 *len)
{
int result = ps3_repository_read_dev_reg_type(bus_index, dev_index,
reg_index, reg_type);
return result ? result
: ps3_repository_read_dev_reg_addr(bus_index, dev_index,
reg_index, bus_addr, len);
}
int ps3_repository_find_device(struct ps3_repository_device *repo)
{
int result;
struct ps3_repository_device tmp = *repo;
unsigned int num_dev;
BUG_ON(repo->bus_index > 10);
BUG_ON(repo->dev_index > 10);
result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev);
if (result) {
pr_devel("%s:%d read_bus_num_dev failed\n", __func__, __LINE__);
return result;
}
pr_devel("%s:%d: bus_type %u, bus_index %u, bus_id %llu, num_dev %u\n",
__func__, __LINE__, tmp.bus_type, tmp.bus_index, tmp.bus_id,
num_dev);
if (tmp.dev_index >= num_dev) {
pr_devel("%s:%d: no device found\n", __func__, __LINE__);
return -ENODEV;
}
result = ps3_repository_read_dev_type(tmp.bus_index, tmp.dev_index,
&tmp.dev_type);
if (result) {
pr_devel("%s:%d read_dev_type failed\n", __func__, __LINE__);
return result;
}
result = ps3_repository_read_dev_id(tmp.bus_index, tmp.dev_index,
&tmp.dev_id);
if (result) {
pr_devel("%s:%d ps3_repository_read_dev_id failed\n", __func__,
__LINE__);
return result;
}
pr_devel("%s:%d: found: dev_type %u, dev_index %u, dev_id %llu\n",
__func__, __LINE__, tmp.dev_type, tmp.dev_index, tmp.dev_id);
*repo = tmp;
return 0;
}
int ps3_repository_find_device_by_id(struct ps3_repository_device *repo,
u64 bus_id, u64 dev_id)
{
int result = -ENODEV;
struct ps3_repository_device tmp;
unsigned int num_dev;
pr_devel(" -> %s:%u: find device by id %llu:%llu\n", __func__, __LINE__,
bus_id, dev_id);
for (tmp.bus_index = 0; tmp.bus_index < 10; tmp.bus_index++) {
result = ps3_repository_read_bus_id(tmp.bus_index,
&tmp.bus_id);
if (result) {
pr_devel("%s:%u read_bus_id(%u) failed\n", __func__,
__LINE__, tmp.bus_index);
return result;
}
if (tmp.bus_id == bus_id)
goto found_bus;
pr_devel("%s:%u: skip, bus_id %llu\n", __func__, __LINE__,
tmp.bus_id);
}
pr_devel(" <- %s:%u: bus not found\n", __func__, __LINE__);
return result;
found_bus:
result = ps3_repository_read_bus_type(tmp.bus_index, &tmp.bus_type);
if (result) {
pr_devel("%s:%u read_bus_type(%u) failed\n", __func__,
__LINE__, tmp.bus_index);
return result;
}
result = ps3_repository_read_bus_num_dev(tmp.bus_index, &num_dev);
if (result) {
pr_devel("%s:%u read_bus_num_dev failed\n", __func__,
__LINE__);
return result;
}
for (tmp.dev_index = 0; tmp.dev_index < num_dev; tmp.dev_index++) {
result = ps3_repository_read_dev_id(tmp.bus_index,
tmp.dev_index,
&tmp.dev_id);
if (result) {
pr_devel("%s:%u read_dev_id(%u:%u) failed\n", __func__,
__LINE__, tmp.bus_index, tmp.dev_index);
return result;
}
if (tmp.dev_id == dev_id)
goto found_dev;
pr_devel("%s:%u: skip, dev_id %llu\n", __func__, __LINE__,
tmp.dev_id);
}
pr_devel(" <- %s:%u: dev not found\n", __func__, __LINE__);
return result;
found_dev:
result = ps3_repository_read_dev_type(tmp.bus_index, tmp.dev_index,
&tmp.dev_type);
if (result) {
pr_devel("%s:%u read_dev_type failed\n", __func__, __LINE__);
return result;
}
pr_devel(" <- %s:%u: found: type (%u:%u) index (%u:%u) id (%llu:%llu)\n",
__func__, __LINE__, tmp.bus_type, tmp.dev_type, tmp.bus_index,
tmp.dev_index, tmp.bus_id, tmp.dev_id);
*repo = tmp;
return 0;
}
int __init ps3_repository_find_devices(enum ps3_bus_type bus_type,
int (*callback)(const struct ps3_repository_device *repo))
{
int result = 0;
struct ps3_repository_device repo;
pr_devel(" -> %s:%d: find bus_type %u\n", __func__, __LINE__, bus_type);
repo.bus_type = bus_type;
result = ps3_repository_find_bus(repo.bus_type, 0, &repo.bus_index);
if (result) {
pr_devel(" <- %s:%u: bus not found\n", __func__, __LINE__);
return result;
}
result = ps3_repository_read_bus_id(repo.bus_index, &repo.bus_id);
if (result) {
pr_devel("%s:%d read_bus_id(%u) failed\n", __func__, __LINE__,
repo.bus_index);
return result;
}
for (repo.dev_index = 0; ; repo.dev_index++) {
result = ps3_repository_find_device(&repo);
if (result == -ENODEV) {
result = 0;
break;
} else if (result)
break;
result = callback(&repo);
if (result) {
pr_devel("%s:%d: abort at callback\n", __func__,
__LINE__);
break;
}
}
pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
int __init ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
unsigned int *bus_index)
{
unsigned int i;
enum ps3_bus_type type;
int error;
for (i = from; i < 10; i++) {
error = ps3_repository_read_bus_type(i, &type);
if (error) {
pr_devel("%s:%d read_bus_type failed\n",
__func__, __LINE__);
*bus_index = UINT_MAX;
return error;
}
if (type == bus_type) {
*bus_index = i;
return 0;
}
}
*bus_index = UINT_MAX;
return -ENODEV;
}
int ps3_repository_find_interrupt(const struct ps3_repository_device *repo,
enum ps3_interrupt_type intr_type, unsigned int *interrupt_id)
{
int result = 0;
unsigned int res_index;
pr_devel("%s:%d: find intr_type %u\n", __func__, __LINE__, intr_type);
*interrupt_id = UINT_MAX;
for (res_index = 0; res_index < 10; res_index++) {
enum ps3_interrupt_type t;
unsigned int id;
result = ps3_repository_read_dev_intr(repo->bus_index,
repo->dev_index, res_index, &t, &id);
if (result) {
pr_devel("%s:%d read_dev_intr failed\n",
__func__, __LINE__);
return result;
}
if (t == intr_type) {
*interrupt_id = id;
break;
}
}
if (res_index == 10)
return -ENODEV;
pr_devel("%s:%d: found intr_type %u at res_index %u\n",
__func__, __LINE__, intr_type, res_index);
return result;
}
int ps3_repository_find_reg(const struct ps3_repository_device *repo,
enum ps3_reg_type reg_type, u64 *bus_addr, u64 *len)
{
int result = 0;
unsigned int res_index;
pr_devel("%s:%d: find reg_type %u\n", __func__, __LINE__, reg_type);
*bus_addr = *len = 0;
for (res_index = 0; res_index < 10; res_index++) {
enum ps3_reg_type t;
u64 a;
u64 l;
result = ps3_repository_read_dev_reg(repo->bus_index,
repo->dev_index, res_index, &t, &a, &l);
if (result) {
pr_devel("%s:%d read_dev_reg failed\n",
__func__, __LINE__);
return result;
}
if (t == reg_type) {
*bus_addr = a;
*len = l;
break;
}
}
if (res_index == 10)
return -ENODEV;
pr_devel("%s:%d: found reg_type %u at res_index %u\n",
__func__, __LINE__, reg_type, res_index);
return result;
}
int ps3_repository_read_stor_dev_port(unsigned int bus_index,
unsigned int dev_index, u64 *port)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("port", 0),
0, port, NULL);
}
int ps3_repository_read_stor_dev_blk_size(unsigned int bus_index,
unsigned int dev_index, u64 *blk_size)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("blk_size", 0),
0, blk_size, NULL);
}
int ps3_repository_read_stor_dev_num_blocks(unsigned int bus_index,
unsigned int dev_index, u64 *num_blocks)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("n_blocks", 0),
0, num_blocks, NULL);
}
int ps3_repository_read_stor_dev_num_regions(unsigned int bus_index,
unsigned int dev_index, unsigned int *num_regions)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("n_regs", 0),
0, &v1, NULL);
*num_regions = v1;
return result;
}
int ps3_repository_read_stor_dev_region_id(unsigned int bus_index,
unsigned int dev_index, unsigned int region_index,
unsigned int *region_id)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("region", region_index),
make_field("id", 0),
&v1, NULL);
*region_id = v1;
return result;
}
int ps3_repository_read_stor_dev_region_size(unsigned int bus_index,
unsigned int dev_index, unsigned int region_index, u64 *region_size)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("region", region_index),
make_field("size", 0),
region_size, NULL);
}
int ps3_repository_read_stor_dev_region_start(unsigned int bus_index,
unsigned int dev_index, unsigned int region_index, u64 *region_start)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("bus", bus_index),
make_field("dev", dev_index),
make_field("region", region_index),
make_field("start", 0),
region_start, NULL);
}
int ps3_repository_read_stor_dev_info(unsigned int bus_index,
unsigned int dev_index, u64 *port, u64 *blk_size,
u64 *num_blocks, unsigned int *num_regions)
{
int result;
result = ps3_repository_read_stor_dev_port(bus_index, dev_index, port);
if (result)
return result;
result = ps3_repository_read_stor_dev_blk_size(bus_index, dev_index,
blk_size);
if (result)
return result;
result = ps3_repository_read_stor_dev_num_blocks(bus_index, dev_index,
num_blocks);
if (result)
return result;
result = ps3_repository_read_stor_dev_num_regions(bus_index, dev_index,
num_regions);
return result;
}
int ps3_repository_read_stor_dev_region(unsigned int bus_index,
unsigned int dev_index, unsigned int region_index,
unsigned int *region_id, u64 *region_start, u64 *region_size)
{
int result;
result = ps3_repository_read_stor_dev_region_id(bus_index, dev_index,
region_index, region_id);
if (result)
return result;
result = ps3_repository_read_stor_dev_region_start(bus_index, dev_index,
region_index, region_start);
if (result)
return result;
result = ps3_repository_read_stor_dev_region_size(bus_index, dev_index,
region_index, region_size);
return result;
}
/**
* ps3_repository_read_num_pu - Number of logical PU processors for this lpar.
*/
int ps3_repository_read_num_pu(u64 *num_pu)
{
*num_pu = 0;
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("pun", 0),
0, 0,
num_pu, NULL);
}
/**
* ps3_repository_read_pu_id - Read the logical PU id.
* @pu_index: Zero based index.
* @pu_id: The logical PU id.
*/
int ps3_repository_read_pu_id(unsigned int pu_index, u64 *pu_id)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("pu", pu_index),
0, 0,
pu_id, NULL);
}
int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("pu", 0),
ppe_id,
make_field("rm_size", 0),
rm_size, NULL);
}
int ps3_repository_read_region_total(u64 *region_total)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("rgntotal", 0),
0, 0,
region_total, NULL);
}
/**
* ps3_repository_read_mm_info - Read mm info for single pu system.
* @rm_base: Real mode memory base address.
* @rm_size: Real mode memory size.
* @region_total: Maximum memory region size.
*/
int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, u64 *region_total)
{
int result;
u64 ppe_id;
lv1_get_logical_ppe_id(&ppe_id);
*rm_base = 0;
result = ps3_repository_read_rm_size(ppe_id, rm_size);
return result ? result
: ps3_repository_read_region_total(region_total);
}
/**
* ps3_repository_read_highmem_region_count - Read the number of highmem regions
*
* Bootloaders must arrange the repository nodes such that regions are indexed
* with a region_index from 0 to region_count-1.
*/
int ps3_repository_read_highmem_region_count(unsigned int *region_count)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("highmem", 0),
make_field("region", 0),
make_field("count", 0),
0,
&v1, NULL);
*region_count = v1;
return result;
}
int ps3_repository_read_highmem_base(unsigned int region_index,
u64 *highmem_base)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("base", 0),
0,
highmem_base, NULL);
}
int ps3_repository_read_highmem_size(unsigned int region_index,
u64 *highmem_size)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("size", 0),
0,
highmem_size, NULL);
}
/**
* ps3_repository_read_highmem_info - Read high memory region info
* @region_index: Region index, {0,..,region_count-1}.
* @highmem_base: High memory base address.
* @highmem_size: High memory size.
*
* Bootloaders that preallocate highmem regions must place the
* region info into the repository at these well known nodes.
*/
int ps3_repository_read_highmem_info(unsigned int region_index,
u64 *highmem_base, u64 *highmem_size)
{
int result;
*highmem_base = 0;
result = ps3_repository_read_highmem_base(region_index, highmem_base);
return result ? result
: ps3_repository_read_highmem_size(region_index, highmem_size);
}
/**
* ps3_repository_read_num_spu_reserved - Number of physical spus reserved.
* @num_spu: Number of physical spus.
*/
int ps3_repository_read_num_spu_reserved(unsigned int *num_spu_reserved)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("spun", 0),
0, 0,
&v1, NULL);
*num_spu_reserved = v1;
return result;
}
/**
* ps3_repository_read_num_spu_resource_id - Number of spu resource reservations.
* @num_resource_id: Number of spu resource ids.
*/
int ps3_repository_read_num_spu_resource_id(unsigned int *num_resource_id)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("spursvn", 0),
0, 0,
&v1, NULL);
*num_resource_id = v1;
return result;
}
/**
* ps3_repository_read_spu_resource_id - spu resource reservation id value.
* @res_index: Resource reservation index.
* @resource_type: Resource reservation type.
* @resource_id: Resource reservation id.
*/
int ps3_repository_read_spu_resource_id(unsigned int res_index,
enum ps3_spu_resource_type *resource_type, unsigned int *resource_id)
{
int result;
u64 v1 = 0;
u64 v2 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("spursv", 0),
res_index,
0,
&v1, &v2);
*resource_type = v1;
*resource_id = v2;
return result;
}
static int ps3_repository_read_boot_dat_address(u64 *address)
{
return read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("boot_dat", 0),
make_field("address", 0),
0,
address, NULL);
}
int ps3_repository_read_boot_dat_size(unsigned int *size)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("boot_dat", 0),
make_field("size", 0),
0,
&v1, NULL);
*size = v1;
return result;
}
int __init ps3_repository_read_vuart_av_port(unsigned int *port)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("vir_uart", 0),
make_field("port", 0),
make_field("avset", 0),
&v1, NULL);
*port = v1;
return result;
}
int __init ps3_repository_read_vuart_sysmgr_port(unsigned int *port)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_CURRENT,
make_first_field("bi", 0),
make_field("vir_uart", 0),
make_field("port", 0),
make_field("sysmgr", 0),
&v1, NULL);
*port = v1;
return result;
}
/**
* ps3_repository_read_boot_dat_info - Get address and size of cell_ext_os_area.
* address: lpar address of cell_ext_os_area
* @size: size of cell_ext_os_area
*/
int ps3_repository_read_boot_dat_info(u64 *lpar_addr, unsigned int *size)
{
int result;
*size = 0;
result = ps3_repository_read_boot_dat_address(lpar_addr);
return result ? result
: ps3_repository_read_boot_dat_size(size);
}
/**
* ps3_repository_read_num_be - Number of physical BE processors in the system.
*/
int ps3_repository_read_num_be(unsigned int *num_be)
{
int result;
u64 v1 = 0;
result = read_node(PS3_LPAR_ID_PME,
make_first_field("ben", 0),
0,
0,
0,
&v1, NULL);
*num_be = v1;
return result;
}
/**
* ps3_repository_read_be_node_id - Read the physical BE processor node id.
* @be_index: Zero based index.
* @node_id: The BE processor node id.
*/
int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("be", be_index),
0,
0,
0,
node_id, NULL);
}
/**
* ps3_repository_read_be_id - Read the physical BE processor id.
* @node_id: The BE processor node id.
* @be_id: The BE processor id.
*/
int ps3_repository_read_be_id(u64 node_id, u64 *be_id)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("be", 0),
node_id,
0,
0,
be_id, NULL);
}
int __init ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("be", 0),
node_id,
make_field("clock", 0),
0,
tb_freq, NULL);
}
int __init ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq)
{
int result;
u64 node_id;
*tb_freq = 0;
result = ps3_repository_read_be_node_id(be_index, &node_id);
return result ? result
: ps3_repository_read_tb_freq(node_id, tb_freq);
}
int ps3_repository_read_lpm_privileges(unsigned int be_index, u64 *lpar,
u64 *rights)
{
int result;
u64 node_id;
*lpar = 0;
*rights = 0;
result = ps3_repository_read_be_node_id(be_index, &node_id);
return result ? result
: read_node(PS3_LPAR_ID_PME,
make_first_field("be", 0),
node_id,
make_field("lpm", 0),
make_field("priv", 0),
lpar, rights);
}
#if defined(CONFIG_PS3_REPOSITORY_WRITE)
static int create_node(u64 n1, u64 n2, u64 n3, u64 n4, u64 v1, u64 v2)
{
int result;
dump_node(0, n1, n2, n3, n4, v1, v2);
result = lv1_create_repository_node(n1, n2, n3, n4, v1, v2);
if (result) {
pr_devel("%s:%d: lv1_create_repository_node failed: %s\n",
__func__, __LINE__, ps3_result(result));
return -ENOENT;
}
return 0;
}
static int delete_node(u64 n1, u64 n2, u64 n3, u64 n4)
{
int result;
dump_node(0, n1, n2, n3, n4, 0, 0);
result = lv1_delete_repository_node(n1, n2, n3, n4);
if (result) {
pr_devel("%s:%d: lv1_delete_repository_node failed: %s\n",
__func__, __LINE__, ps3_result(result));
return -ENOENT;
}
return 0;
}
static int write_node(u64 n1, u64 n2, u64 n3, u64 n4, u64 v1, u64 v2)
{
int result;
result = create_node(n1, n2, n3, n4, v1, v2);
if (!result)
return 0;
result = lv1_write_repository_node(n1, n2, n3, n4, v1, v2);
if (result) {
pr_devel("%s:%d: lv1_write_repository_node failed: %s\n",
__func__, __LINE__, ps3_result(result));
return -ENOENT;
}
return 0;
}
int ps3_repository_write_highmem_region_count(unsigned int region_count)
{
int result;
u64 v1 = (u64)region_count;
result = write_node(
make_first_field("highmem", 0),
make_field("region", 0),
make_field("count", 0),
0,
v1, 0);
return result;
}
int ps3_repository_write_highmem_base(unsigned int region_index,
u64 highmem_base)
{
return write_node(
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("base", 0),
0,
highmem_base, 0);
}
int ps3_repository_write_highmem_size(unsigned int region_index,
u64 highmem_size)
{
return write_node(
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("size", 0),
0,
highmem_size, 0);
}
int ps3_repository_write_highmem_info(unsigned int region_index,
u64 highmem_base, u64 highmem_size)
{
int result;
result = ps3_repository_write_highmem_base(region_index, highmem_base);
return result ? result
: ps3_repository_write_highmem_size(region_index, highmem_size);
}
static int ps3_repository_delete_highmem_base(unsigned int region_index)
{
return delete_node(
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("base", 0),
0);
}
static int ps3_repository_delete_highmem_size(unsigned int region_index)
{
return delete_node(
make_first_field("highmem", 0),
make_field("region", region_index),
make_field("size", 0),
0);
}
int ps3_repository_delete_highmem_info(unsigned int region_index)
{
int result;
result = ps3_repository_delete_highmem_base(region_index);
result += ps3_repository_delete_highmem_size(region_index);
return result ? -1 : 0;
}
#endif /* defined(CONFIG_PS3_REPOSITORY_WRITE) */
#if defined(DEBUG)
int __init ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
{
int result = 0;
unsigned int res_index;
pr_devel(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
for (res_index = 0; res_index < 10; res_index++) {
enum ps3_interrupt_type intr_type;
unsigned int interrupt_id;
result = ps3_repository_read_dev_intr(repo->bus_index,
repo->dev_index, res_index, &intr_type, &interrupt_id);
if (result) {
if (result != LV1_NO_ENTRY)
pr_devel("%s:%d ps3_repository_read_dev_intr"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
break;
}
pr_devel("%s:%d (%u:%u) intr_type %u, interrupt_id %u\n",
__func__, __LINE__, repo->bus_index, repo->dev_index,
intr_type, interrupt_id);
}
for (res_index = 0; res_index < 10; res_index++) {
enum ps3_reg_type reg_type;
u64 bus_addr;
u64 len;
result = ps3_repository_read_dev_reg(repo->bus_index,
repo->dev_index, res_index, ®_type, &bus_addr, &len);
if (result) {
if (result != LV1_NO_ENTRY)
pr_devel("%s:%d ps3_repository_read_dev_reg"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
break;
}
pr_devel("%s:%d (%u:%u) reg_type %u, bus_addr %llxh, len %llxh\n",
__func__, __LINE__, repo->bus_index, repo->dev_index,
reg_type, bus_addr, len);
}
pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
static int __init dump_stor_dev_info(struct ps3_repository_device *repo)
{
int result = 0;
unsigned int num_regions, region_index;
u64 port, blk_size, num_blocks;
pr_devel(" -> %s:%d: (%u:%u)\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
result = ps3_repository_read_stor_dev_info(repo->bus_index,
repo->dev_index, &port, &blk_size, &num_blocks, &num_regions);
if (result) {
pr_devel("%s:%d ps3_repository_read_stor_dev_info"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
goto out;
}
pr_devel("%s:%d (%u:%u): port %llu, blk_size %llu, num_blocks "
"%llu, num_regions %u\n",
__func__, __LINE__, repo->bus_index, repo->dev_index,
port, blk_size, num_blocks, num_regions);
for (region_index = 0; region_index < num_regions; region_index++) {
unsigned int region_id;
u64 region_start, region_size;
result = ps3_repository_read_stor_dev_region(repo->bus_index,
repo->dev_index, region_index, ®ion_id,
®ion_start, ®ion_size);
if (result) {
pr_devel("%s:%d ps3_repository_read_stor_dev_region"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
break;
}
pr_devel("%s:%d (%u:%u) region_id %u, start %lxh, size %lxh\n",
__func__, __LINE__, repo->bus_index, repo->dev_index,
region_id, (unsigned long)region_start,
(unsigned long)region_size);
}
out:
pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
static int __init dump_device_info(struct ps3_repository_device *repo,
unsigned int num_dev)
{
int result = 0;
pr_devel(" -> %s:%d: bus_%u\n", __func__, __LINE__, repo->bus_index);
for (repo->dev_index = 0; repo->dev_index < num_dev;
repo->dev_index++) {
result = ps3_repository_read_dev_type(repo->bus_index,
repo->dev_index, &repo->dev_type);
if (result) {
pr_devel("%s:%d ps3_repository_read_dev_type"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
break;
}
result = ps3_repository_read_dev_id(repo->bus_index,
repo->dev_index, &repo->dev_id);
if (result) {
pr_devel("%s:%d ps3_repository_read_dev_id"
" (%u:%u) failed\n", __func__, __LINE__,
repo->bus_index, repo->dev_index);
continue;
}
pr_devel("%s:%d (%u:%u): dev_type %u, dev_id %lu\n", __func__,
__LINE__, repo->bus_index, repo->dev_index,
repo->dev_type, (unsigned long)repo->dev_id);
ps3_repository_dump_resource_info(repo);
if (repo->bus_type == PS3_BUS_TYPE_STORAGE)
dump_stor_dev_info(repo);
}
pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
int __init ps3_repository_dump_bus_info(void)
{
int result = 0;
struct ps3_repository_device repo;
pr_devel(" -> %s:%d\n", __func__, __LINE__);
memset(&repo, 0, sizeof(repo));
for (repo.bus_index = 0; repo.bus_index < 10; repo.bus_index++) {
unsigned int num_dev;
result = ps3_repository_read_bus_type(repo.bus_index,
&repo.bus_type);
if (result) {
pr_devel("%s:%d read_bus_type(%u) failed\n",
__func__, __LINE__, repo.bus_index);
break;
}
result = ps3_repository_read_bus_id(repo.bus_index,
&repo.bus_id);
if (result) {
pr_devel("%s:%d read_bus_id(%u) failed\n",
__func__, __LINE__, repo.bus_index);
continue;
}
if (repo.bus_index != repo.bus_id)
pr_devel("%s:%d bus_index != bus_id\n",
__func__, __LINE__);
result = ps3_repository_read_bus_num_dev(repo.bus_index,
&num_dev);
if (result) {
pr_devel("%s:%d read_bus_num_dev(%u) failed\n",
__func__, __LINE__, repo.bus_index);
continue;
}
pr_devel("%s:%d bus_%u: bus_type %u, bus_id %lu, num_dev %u\n",
__func__, __LINE__, repo.bus_index, repo.bus_type,
(unsigned long)repo.bus_id, num_dev);
dump_device_info(&repo, num_dev);
}
pr_devel(" <- %s:%d\n", __func__, __LINE__);
return result;
}
#endif /* defined(DEBUG) */
| linux-master | arch/powerpc/platforms/ps3/repository.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 device registration routines.
*
* Copyright (C) 2007 Sony Computer Entertainment Inc.
* Copyright 2007 Sony Corp.
*/
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/reboot.h>
#include <linux/rcuwait.h>
#include <asm/firmware.h>
#include <asm/lv1call.h>
#include <asm/ps3stor.h>
#include "platform.h"
static int __init ps3_register_lpm_devices(void)
{
int result;
u64 tmp1;
u64 tmp2;
struct ps3_system_bus_device *dev;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->match_id = PS3_MATCH_ID_LPM;
dev->dev_type = PS3_DEVICE_TYPE_LPM;
/* The current lpm driver only supports a single BE processor. */
result = ps3_repository_read_be_node_id(0, &dev->lpm.node_id);
if (result) {
pr_debug("%s:%d: ps3_repository_read_be_node_id failed \n",
__func__, __LINE__);
goto fail_read_repo;
}
result = ps3_repository_read_lpm_privileges(dev->lpm.node_id, &tmp1,
&dev->lpm.rights);
if (result) {
pr_debug("%s:%d: ps3_repository_read_lpm_privileges failed\n",
__func__, __LINE__);
goto fail_read_repo;
}
lv1_get_logical_partition_id(&tmp2);
if (tmp1 != tmp2) {
pr_debug("%s:%d: wrong lpar\n",
__func__, __LINE__);
result = -ENODEV;
goto fail_rights;
}
if (!(dev->lpm.rights & PS3_LPM_RIGHTS_USE_LPM)) {
pr_debug("%s:%d: don't have rights to use lpm\n",
__func__, __LINE__);
result = -EPERM;
goto fail_rights;
}
pr_debug("%s:%d: pu_id %llu, rights %llu(%llxh)\n",
__func__, __LINE__, dev->lpm.pu_id, dev->lpm.rights,
dev->lpm.rights);
result = ps3_repository_read_pu_id(0, &dev->lpm.pu_id);
if (result) {
pr_debug("%s:%d: ps3_repository_read_pu_id failed \n",
__func__, __LINE__);
goto fail_read_repo;
}
result = ps3_system_bus_device_register(dev);
if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
goto fail_register;
}
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return 0;
fail_register:
fail_rights:
fail_read_repo:
kfree(dev);
pr_debug(" <- %s:%d: failed\n", __func__, __LINE__);
return result;
}
/**
* ps3_setup_gelic_device - Setup and register a gelic device instance.
*
* Allocates memory for a struct ps3_system_bus_device instance, initialises the
* structure members, and registers the device instance with the system bus.
*/
static int __init ps3_setup_gelic_device(
const struct ps3_repository_device *repo)
{
int result;
struct layout {
struct ps3_system_bus_device dev;
struct ps3_dma_region d_region;
} *p;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
BUG_ON(repo->bus_type != PS3_BUS_TYPE_SB);
BUG_ON(repo->dev_type != PS3_DEV_TYPE_SB_GELIC);
p = kzalloc(sizeof(struct layout), GFP_KERNEL);
if (!p) {
result = -ENOMEM;
goto fail_malloc;
}
p->dev.match_id = PS3_MATCH_ID_GELIC;
p->dev.dev_type = PS3_DEVICE_TYPE_SB;
p->dev.bus_id = repo->bus_id;
p->dev.dev_id = repo->dev_id;
p->dev.d_region = &p->d_region;
result = ps3_repository_find_interrupt(repo,
PS3_INTERRUPT_TYPE_EVENT_PORT, &p->dev.interrupt_id);
if (result) {
pr_debug("%s:%d ps3_repository_find_interrupt failed\n",
__func__, __LINE__);
goto fail_find_interrupt;
}
BUG_ON(p->dev.interrupt_id != 0);
result = ps3_dma_region_init(&p->dev, p->dev.d_region, PS3_DMA_64K,
PS3_DMA_OTHER, NULL, 0);
if (result) {
pr_debug("%s:%d ps3_dma_region_init failed\n",
__func__, __LINE__);
goto fail_dma_init;
}
result = ps3_system_bus_device_register(&p->dev);
if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
goto fail_device_register;
}
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return result;
fail_device_register:
fail_dma_init:
fail_find_interrupt:
kfree(p);
fail_malloc:
pr_debug(" <- %s:%d: fail.\n", __func__, __LINE__);
return result;
}
static int __ref ps3_setup_uhc_device(
const struct ps3_repository_device *repo, enum ps3_match_id match_id,
enum ps3_interrupt_type interrupt_type, enum ps3_reg_type reg_type)
{
int result;
struct layout {
struct ps3_system_bus_device dev;
struct ps3_dma_region d_region;
struct ps3_mmio_region m_region;
} *p;
u64 bus_addr;
u64 len;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
BUG_ON(repo->bus_type != PS3_BUS_TYPE_SB);
BUG_ON(repo->dev_type != PS3_DEV_TYPE_SB_USB);
p = kzalloc(sizeof(struct layout), GFP_KERNEL);
if (!p) {
result = -ENOMEM;
goto fail_malloc;
}
p->dev.match_id = match_id;
p->dev.dev_type = PS3_DEVICE_TYPE_SB;
p->dev.bus_id = repo->bus_id;
p->dev.dev_id = repo->dev_id;
p->dev.d_region = &p->d_region;
p->dev.m_region = &p->m_region;
result = ps3_repository_find_interrupt(repo,
interrupt_type, &p->dev.interrupt_id);
if (result) {
pr_debug("%s:%d ps3_repository_find_interrupt failed\n",
__func__, __LINE__);
goto fail_find_interrupt;
}
result = ps3_repository_find_reg(repo, reg_type,
&bus_addr, &len);
if (result) {
pr_debug("%s:%d ps3_repository_find_reg failed\n",
__func__, __LINE__);
goto fail_find_reg;
}
result = ps3_dma_region_init(&p->dev, p->dev.d_region, PS3_DMA_64K,
PS3_DMA_INTERNAL, NULL, 0);
if (result) {
pr_debug("%s:%d ps3_dma_region_init failed\n",
__func__, __LINE__);
goto fail_dma_init;
}
result = ps3_mmio_region_init(&p->dev, p->dev.m_region, bus_addr, len,
PS3_MMIO_4K);
if (result) {
pr_debug("%s:%d ps3_mmio_region_init failed\n",
__func__, __LINE__);
goto fail_mmio_init;
}
result = ps3_system_bus_device_register(&p->dev);
if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
goto fail_device_register;
}
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return result;
fail_device_register:
fail_mmio_init:
fail_dma_init:
fail_find_reg:
fail_find_interrupt:
kfree(p);
fail_malloc:
pr_debug(" <- %s:%d: fail.\n", __func__, __LINE__);
return result;
}
static int __init ps3_setup_ehci_device(
const struct ps3_repository_device *repo)
{
return ps3_setup_uhc_device(repo, PS3_MATCH_ID_EHCI,
PS3_INTERRUPT_TYPE_SB_EHCI, PS3_REG_TYPE_SB_EHCI);
}
static int __init ps3_setup_ohci_device(
const struct ps3_repository_device *repo)
{
return ps3_setup_uhc_device(repo, PS3_MATCH_ID_OHCI,
PS3_INTERRUPT_TYPE_SB_OHCI, PS3_REG_TYPE_SB_OHCI);
}
static int __init ps3_setup_vuart_device(enum ps3_match_id match_id,
unsigned int port_number)
{
int result;
struct layout {
struct ps3_system_bus_device dev;
} *p;
pr_debug(" -> %s:%d: match_id %u, port %u\n", __func__, __LINE__,
match_id, port_number);
p = kzalloc(sizeof(struct layout), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev.match_id = match_id;
p->dev.dev_type = PS3_DEVICE_TYPE_VUART;
p->dev.port_number = port_number;
result = ps3_system_bus_device_register(&p->dev);
if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
goto fail_device_register;
}
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return 0;
fail_device_register:
kfree(p);
pr_debug(" <- %s:%d fail\n", __func__, __LINE__);
return result;
}
static int ps3_setup_storage_dev(const struct ps3_repository_device *repo,
enum ps3_match_id match_id)
{
int result;
struct ps3_storage_device *p;
u64 port, blk_size, num_blocks;
unsigned int num_regions, i;
pr_debug(" -> %s:%u: match_id %u\n", __func__, __LINE__, match_id);
result = ps3_repository_read_stor_dev_info(repo->bus_index,
repo->dev_index, &port,
&blk_size, &num_blocks,
&num_regions);
if (result) {
printk(KERN_ERR "%s:%u: _read_stor_dev_info failed %d\n",
__func__, __LINE__, result);
return -ENODEV;
}
pr_debug("%s:%u: (%u:%u:%u): port %llu blk_size %llu num_blocks %llu "
"num_regions %u\n", __func__, __LINE__, repo->bus_index,
repo->dev_index, repo->dev_type, port, blk_size, num_blocks,
num_regions);
p = kzalloc(struct_size(p, regions, num_regions), GFP_KERNEL);
if (!p) {
result = -ENOMEM;
goto fail_malloc;
}
p->sbd.match_id = match_id;
p->sbd.dev_type = PS3_DEVICE_TYPE_SB;
p->sbd.bus_id = repo->bus_id;
p->sbd.dev_id = repo->dev_id;
p->sbd.d_region = &p->dma_region;
p->blk_size = blk_size;
p->num_regions = num_regions;
result = ps3_repository_find_interrupt(repo,
PS3_INTERRUPT_TYPE_EVENT_PORT,
&p->sbd.interrupt_id);
if (result) {
printk(KERN_ERR "%s:%u: find_interrupt failed %d\n", __func__,
__LINE__, result);
result = -ENODEV;
goto fail_find_interrupt;
}
for (i = 0; i < num_regions; i++) {
unsigned int id;
u64 start, size;
result = ps3_repository_read_stor_dev_region(repo->bus_index,
repo->dev_index,
i, &id, &start,
&size);
if (result) {
printk(KERN_ERR
"%s:%u: read_stor_dev_region failed %d\n",
__func__, __LINE__, result);
result = -ENODEV;
goto fail_read_region;
}
pr_debug("%s:%u: region %u: id %u start %llu size %llu\n",
__func__, __LINE__, i, id, start, size);
p->regions[i].id = id;
p->regions[i].start = start;
p->regions[i].size = size;
}
result = ps3_system_bus_device_register(&p->sbd);
if (result) {
pr_debug("%s:%u ps3_system_bus_device_register failed\n",
__func__, __LINE__);
goto fail_device_register;
}
pr_debug(" <- %s:%u\n", __func__, __LINE__);
return 0;
fail_device_register:
fail_read_region:
fail_find_interrupt:
kfree(p);
fail_malloc:
pr_debug(" <- %s:%u: fail.\n", __func__, __LINE__);
return result;
}
static int __init ps3_register_vuart_devices(void)
{
int result;
unsigned int port_number;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
result = ps3_repository_read_vuart_av_port(&port_number);
if (result)
port_number = 0; /* av default */
result = ps3_setup_vuart_device(PS3_MATCH_ID_AV_SETTINGS, port_number);
WARN_ON(result);
result = ps3_repository_read_vuart_sysmgr_port(&port_number);
if (result)
port_number = 2; /* sysmgr default */
result = ps3_setup_vuart_device(PS3_MATCH_ID_SYSTEM_MANAGER,
port_number);
WARN_ON(result);
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return result;
}
static int __init ps3_register_sound_devices(void)
{
int result;
struct layout {
struct ps3_system_bus_device dev;
struct ps3_dma_region d_region;
struct ps3_mmio_region m_region;
} *p;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev.match_id = PS3_MATCH_ID_SOUND;
p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
p->dev.d_region = &p->d_region;
p->dev.m_region = &p->m_region;
result = ps3_system_bus_device_register(&p->dev);
if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
goto fail_device_register;
}
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return 0;
fail_device_register:
kfree(p);
pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
return result;
}
static int __init ps3_register_graphics_devices(void)
{
int result;
struct layout {
struct ps3_system_bus_device dev;
} *p;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
p = kzalloc(sizeof(struct layout), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev.match_id = PS3_MATCH_ID_GPU;
p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_FB;
p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
result = ps3_system_bus_device_register(&p->dev);
if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
goto fail_device_register;
}
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return 0;
fail_device_register:
kfree(p);
pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
return result;
}
static int __init ps3_register_ramdisk_device(void)
{
int result;
struct layout {
struct ps3_system_bus_device dev;
} *p;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
p = kzalloc(sizeof(struct layout), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev.match_id = PS3_MATCH_ID_GPU;
p->dev.match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK;
p->dev.dev_type = PS3_DEVICE_TYPE_IOC0;
result = ps3_system_bus_device_register(&p->dev);
if (result) {
pr_debug("%s:%d ps3_system_bus_device_register failed\n",
__func__, __LINE__);
goto fail_device_register;
}
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return 0;
fail_device_register:
kfree(p);
pr_debug(" <- %s:%d failed\n", __func__, __LINE__);
return result;
}
/**
* ps3_setup_dynamic_device - Setup a dynamic device from the repository
*/
static int ps3_setup_dynamic_device(const struct ps3_repository_device *repo)
{
int result;
switch (repo->dev_type) {
case PS3_DEV_TYPE_STOR_DISK:
result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_DISK);
/* Some devices are not accessible from the Other OS lpar. */
if (result == -ENODEV) {
result = 0;
pr_debug("%s:%u: not accessible\n", __func__,
__LINE__);
}
if (result)
pr_debug("%s:%u ps3_setup_storage_dev failed\n",
__func__, __LINE__);
break;
case PS3_DEV_TYPE_STOR_ROM:
result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_ROM);
if (result)
pr_debug("%s:%u ps3_setup_storage_dev failed\n",
__func__, __LINE__);
break;
case PS3_DEV_TYPE_STOR_FLASH:
result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_FLASH);
if (result)
pr_debug("%s:%u ps3_setup_storage_dev failed\n",
__func__, __LINE__);
break;
default:
result = 0;
pr_debug("%s:%u: unsupported dev_type %u\n", __func__, __LINE__,
repo->dev_type);
}
return result;
}
/**
* ps3_setup_static_device - Setup a static device from the repository
*/
static int __init ps3_setup_static_device(const struct ps3_repository_device *repo)
{
int result;
switch (repo->dev_type) {
case PS3_DEV_TYPE_SB_GELIC:
result = ps3_setup_gelic_device(repo);
if (result) {
pr_debug("%s:%d ps3_setup_gelic_device failed\n",
__func__, __LINE__);
}
break;
case PS3_DEV_TYPE_SB_USB:
/* Each USB device has both an EHCI and an OHCI HC */
result = ps3_setup_ehci_device(repo);
if (result) {
pr_debug("%s:%d ps3_setup_ehci_device failed\n",
__func__, __LINE__);
}
result = ps3_setup_ohci_device(repo);
if (result) {
pr_debug("%s:%d ps3_setup_ohci_device failed\n",
__func__, __LINE__);
}
break;
default:
return ps3_setup_dynamic_device(repo);
}
return result;
}
static void ps3_find_and_add_device(u64 bus_id, u64 dev_id)
{
struct ps3_repository_device repo;
int res;
unsigned int retries;
unsigned long rem;
/*
* On some firmware versions (e.g. 1.90), the device may not show up
* in the repository immediately
*/
for (retries = 0; retries < 10; retries++) {
res = ps3_repository_find_device_by_id(&repo, bus_id, dev_id);
if (!res)
goto found;
rem = msleep_interruptible(100);
if (rem)
break;
}
pr_warn("%s:%u: device %llu:%llu not found\n",
__func__, __LINE__, bus_id, dev_id);
return;
found:
if (retries)
pr_debug("%s:%u: device %llu:%llu found after %u retries\n",
__func__, __LINE__, bus_id, dev_id, retries);
ps3_setup_dynamic_device(&repo);
return;
}
#define PS3_NOTIFICATION_DEV_ID ULONG_MAX
#define PS3_NOTIFICATION_INTERRUPT_ID 0
struct ps3_notification_device {
struct ps3_system_bus_device sbd;
spinlock_t lock;
u64 tag;
u64 lv1_status;
struct rcuwait wait;
bool done;
};
enum ps3_notify_type {
notify_device_ready = 0,
notify_region_probe = 1,
notify_region_update = 2,
};
struct ps3_notify_cmd {
u64 operation_code; /* must be zero */
u64 event_mask; /* OR of 1UL << enum ps3_notify_type */
};
struct ps3_notify_event {
u64 event_type; /* enum ps3_notify_type */
u64 bus_id;
u64 dev_id;
u64 dev_type;
u64 dev_port;
};
static irqreturn_t ps3_notification_interrupt(int irq, void *data)
{
struct ps3_notification_device *dev = data;
int res;
u64 tag, status;
spin_lock(&dev->lock);
res = lv1_storage_get_async_status(PS3_NOTIFICATION_DEV_ID, &tag,
&status);
if (tag != dev->tag)
pr_err("%s:%u: tag mismatch, got %llx, expected %llx\n",
__func__, __LINE__, tag, dev->tag);
if (res) {
pr_err("%s:%u: res %d status 0x%llx\n", __func__, __LINE__, res,
status);
} else {
pr_debug("%s:%u: completed, status 0x%llx\n", __func__,
__LINE__, status);
dev->lv1_status = status;
dev->done = true;
rcuwait_wake_up(&dev->wait);
}
spin_unlock(&dev->lock);
return IRQ_HANDLED;
}
static int ps3_notification_read_write(struct ps3_notification_device *dev,
u64 lpar, int write)
{
const char *op = write ? "write" : "read";
unsigned long flags;
int res;
spin_lock_irqsave(&dev->lock, flags);
res = write ? lv1_storage_write(dev->sbd.dev_id, 0, 0, 1, 0, lpar,
&dev->tag)
: lv1_storage_read(dev->sbd.dev_id, 0, 0, 1, 0, lpar,
&dev->tag);
dev->done = false;
spin_unlock_irqrestore(&dev->lock, flags);
if (res) {
pr_err("%s:%u: %s failed %d\n", __func__, __LINE__, op, res);
return -EPERM;
}
pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
rcuwait_wait_event(&dev->wait, dev->done || kthread_should_stop(), TASK_IDLE);
if (kthread_should_stop())
res = -EINTR;
if (dev->lv1_status) {
pr_err("%s:%u: %s not completed, status 0x%llx\n", __func__,
__LINE__, op, dev->lv1_status);
return -EIO;
}
pr_debug("%s:%u: notification %s completed\n", __func__, __LINE__, op);
return 0;
}
static struct task_struct *probe_task;
/**
* ps3_probe_thread - Background repository probing at system startup.
*
* This implementation only supports background probing on a single bus.
* It uses the hypervisor's storage device notification mechanism to wait until
* a storage device is ready. The device notification mechanism uses a
* pseudo device to asynchronously notify the guest when storage devices become
* ready. The notification device has a block size of 512 bytes.
*/
static int ps3_probe_thread(void *data)
{
struct ps3_notification_device dev;
int res;
unsigned int irq;
u64 lpar;
void *buf;
struct ps3_notify_cmd *notify_cmd;
struct ps3_notify_event *notify_event;
pr_debug(" -> %s:%u: kthread started\n", __func__, __LINE__);
buf = kzalloc(512, GFP_KERNEL);
if (!buf)
return -ENOMEM;
lpar = ps3_mm_phys_to_lpar(__pa(buf));
notify_cmd = buf;
notify_event = buf;
/* dummy system bus device */
dev.sbd.bus_id = (u64)data;
dev.sbd.dev_id = PS3_NOTIFICATION_DEV_ID;
dev.sbd.interrupt_id = PS3_NOTIFICATION_INTERRUPT_ID;
res = lv1_open_device(dev.sbd.bus_id, dev.sbd.dev_id, 0);
if (res) {
pr_err("%s:%u: lv1_open_device failed %s\n", __func__,
__LINE__, ps3_result(res));
goto fail_free;
}
res = ps3_sb_event_receive_port_setup(&dev.sbd, PS3_BINDING_CPU_ANY,
&irq);
if (res) {
pr_err("%s:%u: ps3_sb_event_receive_port_setup failed %d\n",
__func__, __LINE__, res);
goto fail_close_device;
}
spin_lock_init(&dev.lock);
rcuwait_init(&dev.wait);
res = request_irq(irq, ps3_notification_interrupt, 0,
"ps3_notification", &dev);
if (res) {
pr_err("%s:%u: request_irq failed %d\n", __func__, __LINE__,
res);
goto fail_sb_event_receive_port_destroy;
}
/* Setup and write the request for device notification. */
notify_cmd->operation_code = 0; /* must be zero */
notify_cmd->event_mask = 1UL << notify_region_probe;
res = ps3_notification_read_write(&dev, lpar, 1);
if (res)
goto fail_free_irq;
/* Loop here processing the requested notification events. */
do {
try_to_freeze();
memset(notify_event, 0, sizeof(*notify_event));
res = ps3_notification_read_write(&dev, lpar, 0);
if (res)
break;
pr_debug("%s:%u: notify event type 0x%llx bus id %llu dev id %llu"
" type %llu port %llu\n", __func__, __LINE__,
notify_event->event_type, notify_event->bus_id,
notify_event->dev_id, notify_event->dev_type,
notify_event->dev_port);
if (notify_event->event_type != notify_region_probe ||
notify_event->bus_id != dev.sbd.bus_id) {
pr_warn("%s:%u: bad notify_event: event %llu, dev_id %llu, dev_type %llu\n",
__func__, __LINE__, notify_event->event_type,
notify_event->dev_id, notify_event->dev_type);
continue;
}
ps3_find_and_add_device(dev.sbd.bus_id, notify_event->dev_id);
} while (!kthread_should_stop());
fail_free_irq:
free_irq(irq, &dev);
fail_sb_event_receive_port_destroy:
ps3_sb_event_receive_port_destroy(&dev.sbd, irq);
fail_close_device:
lv1_close_device(dev.sbd.bus_id, dev.sbd.dev_id);
fail_free:
kfree(buf);
probe_task = NULL;
pr_debug(" <- %s:%u: kthread finished\n", __func__, __LINE__);
return 0;
}
/**
* ps3_stop_probe_thread - Stops the background probe thread.
*
*/
static int ps3_stop_probe_thread(struct notifier_block *nb, unsigned long code,
void *data)
{
if (probe_task)
kthread_stop(probe_task);
return 0;
}
static struct notifier_block nb = {
.notifier_call = ps3_stop_probe_thread
};
/**
* ps3_start_probe_thread - Starts the background probe thread.
*
*/
static int __init ps3_start_probe_thread(enum ps3_bus_type bus_type)
{
int result;
struct task_struct *task;
struct ps3_repository_device repo;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
memset(&repo, 0, sizeof(repo));
repo.bus_type = bus_type;
result = ps3_repository_find_bus(repo.bus_type, 0, &repo.bus_index);
if (result) {
printk(KERN_ERR "%s: Cannot find bus (%d)\n", __func__, result);
return -ENODEV;
}
result = ps3_repository_read_bus_id(repo.bus_index, &repo.bus_id);
if (result) {
printk(KERN_ERR "%s: read_bus_id failed %d\n", __func__,
result);
return -ENODEV;
}
task = kthread_run(ps3_probe_thread, (void *)repo.bus_id,
"ps3-probe-%u", bus_type);
if (IS_ERR(task)) {
result = PTR_ERR(task);
printk(KERN_ERR "%s: kthread_run failed %d\n", __func__,
result);
return result;
}
probe_task = task;
register_reboot_notifier(&nb);
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return 0;
}
/**
* ps3_register_devices - Probe the system and register devices found.
*
* A device_initcall() routine.
*/
static int __init ps3_register_devices(void)
{
int result;
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
return -ENODEV;
pr_debug(" -> %s:%d\n", __func__, __LINE__);
/* ps3_repository_dump_bus_info(); */
result = ps3_start_probe_thread(PS3_BUS_TYPE_STORAGE);
ps3_register_vuart_devices();
ps3_register_graphics_devices();
ps3_repository_find_devices(PS3_BUS_TYPE_SB, ps3_setup_static_device);
ps3_register_sound_devices();
ps3_register_lpm_devices();
ps3_register_ramdisk_device();
pr_debug(" <- %s:%d\n", __func__, __LINE__);
return 0;
}
device_initcall(ps3_register_devices);
| linux-master | arch/powerpc/platforms/ps3/device-init.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* PS3 SMP routines.
*
* Copyright (C) 2006 Sony Computer Entertainment Inc.
* Copyright 2006 Sony Corp.
*/
#include <linux/kernel.h>
#include <linux/smp.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include "platform.h"
#if defined(DEBUG)
#define DBG udbg_printf
#else
#define DBG pr_debug
#endif
/**
* ps3_ipi_virqs - a per cpu array of virqs for ipi use
*/
#define MSG_COUNT 4
static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs);
static void ps3_smp_message_pass(int cpu, int msg)
{
int result;
unsigned int virq;
if (msg >= MSG_COUNT) {
DBG("%s:%d: bad msg: %d\n", __func__, __LINE__, msg);
return;
}
virq = per_cpu(ps3_ipi_virqs, cpu)[msg];
result = ps3_send_event_locally(virq);
if (result)
DBG("%s:%d: ps3_send_event_locally(%d, %d) failed"
" (%d)\n", __func__, __LINE__, cpu, msg, result);
}
static void __init ps3_smp_probe(void)
{
int cpu;
for (cpu = 0; cpu < 2; cpu++) {
int result;
unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
int i;
DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
/*
* Check assumptions on ps3_ipi_virqs[] indexing. If this
* check fails, then a different mapping of PPC_MSG_
* to index needs to be setup.
*/
BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0);
BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1);
BUILD_BUG_ON(PPC_MSG_TICK_BROADCAST != 2);
BUILD_BUG_ON(PPC_MSG_NMI_IPI != 3);
for (i = 0; i < MSG_COUNT; i++) {
result = ps3_event_receive_port_setup(cpu, &virqs[i]);
if (result)
continue;
DBG("%s:%d: (%d, %d) => virq %u\n",
__func__, __LINE__, cpu, i, virqs[i]);
result = smp_request_message_ipi(virqs[i], i);
if (result)
virqs[i] = 0;
else
ps3_register_ipi_irq(cpu, virqs[i]);
}
ps3_register_ipi_debug_brk(cpu, virqs[PPC_MSG_NMI_IPI]);
DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
}
}
void ps3_smp_cleanup_cpu(int cpu)
{
unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu);
int i;
DBG(" -> %s:%d: (%d)\n", __func__, __LINE__, cpu);
for (i = 0; i < MSG_COUNT; i++) {
/* Can't call free_irq from interrupt context. */
ps3_event_receive_port_destroy(virqs[i]);
virqs[i] = 0;
}
DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
}
static struct smp_ops_t ps3_smp_ops = {
.probe = ps3_smp_probe,
.message_pass = ps3_smp_message_pass,
.kick_cpu = smp_generic_kick_cpu,
};
void __init smp_init_ps3(void)
{
DBG(" -> %s\n", __func__);
smp_ops = &ps3_smp_ops;
DBG(" <- %s\n", __func__);
}
| linux-master | arch/powerpc/platforms/ps3/smp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2004 Benjamin Herrenschmuidt ([email protected]),
* IBM Corp.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/of_irq.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/iommu.h>
#include <asm/ppc-pci.h>
#include <asm/isa-bridge.h>
#include "maple.h"
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
static struct pci_controller *u3_agp, *u3_ht, *u4_pcie;
static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
{
for (; node; node = node->sibling) {
const int *bus_range;
const unsigned int *class_code;
int len;
/* For PCI<->PCI bridges or CardBus bridges, we go down */
class_code = of_get_property(node, "class-code", NULL);
if (!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
(*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
continue;
bus_range = of_get_property(node, "bus-range", &len);
if (bus_range != NULL && len > 2 * sizeof(int)) {
if (bus_range[1] > higher)
higher = bus_range[1];
}
higher = fixup_one_level_bus_range(node->child, higher);
}
return higher;
}
/* This routine fixes the "bus-range" property of all bridges in the
* system since they tend to have their "last" member wrong on macs
*
* Note that the bus numbers manipulated here are OF bus numbers, they
* are not Linux bus numbers.
*/
static void __init fixup_bus_range(struct device_node *bridge)
{
int *bus_range;
struct property *prop;
int len;
/* Lookup the "bus-range" property for the hose */
prop = of_find_property(bridge, "bus-range", &len);
if (prop == NULL || prop->value == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %pOF\n",
bridge);
return;
}
bus_range = prop->value;
bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]);
}
static unsigned long u3_agp_cfa0(u8 devfn, u8 off)
{
return (1 << (unsigned long)PCI_SLOT(devfn)) |
((unsigned long)PCI_FUNC(devfn) << 8) |
((unsigned long)off & 0xFCUL);
}
static unsigned long u3_agp_cfa1(u8 bus, u8 devfn, u8 off)
{
return ((unsigned long)bus << 16) |
((unsigned long)devfn << 8) |
((unsigned long)off & 0xFCUL) |
1UL;
}
static volatile void __iomem *u3_agp_cfg_access(struct pci_controller* hose,
u8 bus, u8 dev_fn, u8 offset)
{
unsigned int caddr;
if (bus == hose->first_busno) {
if (dev_fn < (11 << 3))
return NULL;
caddr = u3_agp_cfa0(dev_fn, offset);
} else
caddr = u3_agp_cfa1(bus, dev_fn, offset);
/* Uninorth will return garbage if we don't read back the value ! */
do {
out_le32(hose->cfg_addr, caddr);
} while (in_le32(hose->cfg_addr) != caddr);
offset &= 0x07;
return hose->cfg_data + offset;
}
static int u3_agp_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
addr = u3_agp_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
*val = in_8(addr);
break;
case 2:
*val = in_le16(addr);
break;
default:
*val = in_le32(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int u3_agp_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
addr = u3_agp_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
out_8(addr, val);
break;
case 2:
out_le16(addr, val);
break;
default:
out_le32(addr, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops u3_agp_pci_ops =
{
.read = u3_agp_read_config,
.write = u3_agp_write_config,
};
static unsigned long u3_ht_cfa0(u8 devfn, u8 off)
{
return (devfn << 8) | off;
}
static unsigned long u3_ht_cfa1(u8 bus, u8 devfn, u8 off)
{
return u3_ht_cfa0(devfn, off) + (bus << 16) + 0x01000000UL;
}
static volatile void __iomem *u3_ht_cfg_access(struct pci_controller* hose,
u8 bus, u8 devfn, u8 offset)
{
if (bus == hose->first_busno) {
if (PCI_SLOT(devfn) == 0)
return NULL;
return hose->cfg_data + u3_ht_cfa0(devfn, offset);
} else
return hose->cfg_data + u3_ht_cfa1(bus, devfn, offset);
}
static int u3_ht_root_read_config(struct pci_controller *hose, u8 offset,
int len, u32 *val)
{
volatile void __iomem *addr;
addr = hose->cfg_addr;
addr += ((offset & ~3) << 2) + (4 - len - (offset & 3));
switch (len) {
case 1:
*val = in_8(addr);
break;
case 2:
*val = in_be16(addr);
break;
default:
*val = in_be32(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int u3_ht_root_write_config(struct pci_controller *hose, u8 offset,
int len, u32 val)
{
volatile void __iomem *addr;
addr = hose->cfg_addr + ((offset & ~3) << 2) + (4 - len - (offset & 3));
if (offset >= PCI_BASE_ADDRESS_0 && offset < PCI_CAPABILITY_LIST)
return PCIBIOS_SUCCESSFUL;
switch (len) {
case 1:
out_8(addr, val);
break;
case 2:
out_be16(addr, val);
break;
default:
out_be32(addr, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0))
return u3_ht_root_read_config(hose, offset, len, val);
if (offset > 0xff)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
*val = in_8(addr);
break;
case 2:
*val = in_le16(addr);
break;
default:
*val = in_le32(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (bus->number == hose->first_busno && devfn == PCI_DEVFN(0, 0))
return u3_ht_root_write_config(hose, offset, len, val);
if (offset > 0xff)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u3_ht_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
out_8(addr, val);
break;
case 2:
out_le16(addr, val);
break;
default:
out_le32(addr, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops u3_ht_pci_ops =
{
.read = u3_ht_read_config,
.write = u3_ht_write_config,
};
static unsigned int u4_pcie_cfa0(unsigned int devfn, unsigned int off)
{
return (1 << PCI_SLOT(devfn)) |
(PCI_FUNC(devfn) << 8) |
((off >> 8) << 28) |
(off & 0xfcu);
}
static unsigned int u4_pcie_cfa1(unsigned int bus, unsigned int devfn,
unsigned int off)
{
return (bus << 16) |
(devfn << 8) |
((off >> 8) << 28) |
(off & 0xfcu) | 1u;
}
static volatile void __iomem *u4_pcie_cfg_access(struct pci_controller* hose,
u8 bus, u8 dev_fn, int offset)
{
unsigned int caddr;
if (bus == hose->first_busno)
caddr = u4_pcie_cfa0(dev_fn, offset);
else
caddr = u4_pcie_cfa1(bus, dev_fn, offset);
/* Uninorth will return garbage if we don't read back the value ! */
do {
out_le32(hose->cfg_addr, caddr);
} while (in_le32(hose->cfg_addr) != caddr);
offset &= 0x03;
return hose->cfg_data + offset;
}
static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x1000)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
*val = in_8(addr);
break;
case 2:
*val = in_le16(addr);
break;
default:
*val = in_le32(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose;
volatile void __iomem *addr;
hose = pci_bus_to_host(bus);
if (hose == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
if (offset >= 0x1000)
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset);
if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
out_8(addr, val);
break;
case 2:
out_le16(addr, val);
break;
default:
out_le32(addr, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops u4_pcie_pci_ops =
{
.read = u4_pcie_read_config,
.write = u4_pcie_write_config,
};
static void __init setup_u3_agp(struct pci_controller* hose)
{
/* On G5, we move AGP up to high bus number so we don't need
* to reassign bus numbers for HT. If we ever have P2P bridges
* on AGP, we'll have to move pci_assign_all_buses to the
* pci_controller structure so we enable it for AGP and not for
* HT childs.
* We hard code the address because of the different size of
* the reg address cell, we shall fix that by killing struct
* reg_property and using some accessor functions instead
*/
hose->first_busno = 0xf0;
hose->last_busno = 0xff;
hose->ops = &u3_agp_pci_ops;
hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
u3_agp = hose;
}
static void __init setup_u4_pcie(struct pci_controller* hose)
{
/* We currently only implement the "non-atomic" config space, to
* be optimised later.
*/
hose->ops = &u4_pcie_pci_ops;
hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000);
hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000);
u4_pcie = hose;
}
static void __init setup_u3_ht(struct pci_controller* hose)
{
hose->ops = &u3_ht_pci_ops;
/* We hard code the address because of the different size of
* the reg address cell, we shall fix that by killing struct
* reg_property and using some accessor functions instead
*/
hose->cfg_data = ioremap(0xf2000000, 0x02000000);
hose->cfg_addr = ioremap(0xf8070000, 0x1000);
hose->first_busno = 0;
hose->last_busno = 0xef;
u3_ht = hose;
}
static int __init maple_add_bridge(struct device_node *dev)
{
int len;
struct pci_controller *hose;
char* disp_name;
const int *bus_range;
int primary = 1;
DBG("Adding PCI host bridge %pOF\n", dev);
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %pOF, assume bus 0\n",
dev);
}
hose = pcibios_alloc_controller(dev);
if (hose == NULL)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
hose->controller_ops = maple_pci_controller_ops;
disp_name = NULL;
if (of_device_is_compatible(dev, "u3-agp")) {
setup_u3_agp(hose);
disp_name = "U3-AGP";
primary = 0;
} else if (of_device_is_compatible(dev, "u3-ht")) {
setup_u3_ht(hose);
disp_name = "U3-HT";
primary = 1;
} else if (of_device_is_compatible(dev, "u4-pcie")) {
setup_u4_pcie(hose);
disp_name = "U4-PCIE";
primary = 0;
}
printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n",
disp_name, hose->first_busno, hose->last_busno);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, primary);
/* Fixup "bus-range" OF property */
fixup_bus_range(dev);
/* Check for legacy IOs */
isa_bridge_find_early(hose);
/* create pci_dn's for DT nodes under this PHB */
pci_devs_phb_init_dynamic(hose);
return 0;
}
void maple_pci_irq_fixup(struct pci_dev *dev)
{
DBG(" -> maple_pci_irq_fixup\n");
/* Fixup IRQ for PCIe host */
if (u4_pcie != NULL && dev->bus->number == 0 &&
pci_bus_to_host(dev->bus) == u4_pcie) {
printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n");
dev->irq = irq_create_mapping(NULL, 1);
if (dev->irq)
irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW);
}
/* Hide AMD8111 IDE interrupt when in legacy mode so
* the driver calls pci_get_legacy_ide_irq()
*/
if (dev->vendor == PCI_VENDOR_ID_AMD &&
dev->device == PCI_DEVICE_ID_AMD_8111_IDE &&
(dev->class & 5) != 5) {
dev->irq = 0;
}
DBG(" <- maple_pci_irq_fixup\n");
}
static int maple_pci_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct pci_controller *hose = pci_bus_to_host(bridge->bus);
struct device_node *np, *child;
if (hose != u3_agp)
return 0;
/* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We
* assume there is no P2P bridge on the AGP bus, which should be a
* safe assumptions hopefully.
*/
np = hose->dn;
PCI_DN(np)->busno = 0xf0;
for_each_child_of_node(np, child)
PCI_DN(child)->busno = 0xf0;
return 0;
}
void __init maple_pci_init(void)
{
struct device_node *np, *root;
struct device_node *ht = NULL;
/* Probe root PCI hosts, that is on U3 the AGP host and the
* HyperTransport host. That one is actually "kept" around
* and actually added last as it's resource management relies
* on the AGP resources to have been setup first
*/
root = of_find_node_by_path("/");
if (root == NULL) {
printk(KERN_CRIT "maple_find_bridges: can't find root of device tree\n");
return;
}
for_each_child_of_node(root, np) {
if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "ht"))
continue;
if ((of_device_is_compatible(np, "u4-pcie") ||
of_device_is_compatible(np, "u3-agp")) &&
maple_add_bridge(np) == 0)
of_node_get(np);
if (of_device_is_compatible(np, "u3-ht")) {
of_node_get(np);
ht = np;
}
}
of_node_put(root);
/* Now setup the HyperTransport host if we found any
*/
if (ht && maple_add_bridge(ht) != 0)
of_node_put(ht);
ppc_md.pcibios_root_bridge_prepare = maple_pci_root_bridge_prepare;
/* Tell pci.c to not change any resource allocations. */
pci_add_flags(PCI_PROBE_ONLY);
}
int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel)
{
struct device_node *np;
unsigned int defirq = channel ? 15 : 14;
unsigned int irq;
if (pdev->vendor != PCI_VENDOR_ID_AMD ||
pdev->device != PCI_DEVICE_ID_AMD_8111_IDE)
return defirq;
np = pci_device_to_OF_node(pdev);
if (np == NULL) {
printk("Failed to locate OF node for IDE %s\n",
pci_name(pdev));
return defirq;
}
irq = irq_of_parse_and_map(np, channel & 0x1);
if (!irq) {
printk("Failed to map onboard IDE interrupt for channel %d\n",
channel);
return defirq;
}
return irq;
}
static void quirk_ipr_msi(struct pci_dev *dev)
{
/* Something prevents MSIs from the IPR from working on Bimini,
* and the driver has no smarts to recover. So disable MSI
* on it for now. */
if (machine_is(maple)) {
dev->no_msi = 1;
dev_info(&dev->dev, "Quirk disabled MSI\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
quirk_ipr_msi);
struct pci_controller_ops maple_pci_controller_ops = {
};
| linux-master | arch/powerpc/platforms/maple/pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Maple (970 eval board) setup code
*
* (c) Copyright 2004 Benjamin Herrenschmidt ([email protected]),
* IBM Corp.
*/
#undef DEBUG
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/tty.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/major.h>
#include <linux/initrd.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
#include <linux/pci.h>
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/pmu.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/serial.h>
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/memblock.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
#include <asm/dma.h>
#include <asm/cputable.h>
#include <asm/time.h>
#include <asm/mpic.h>
#include <asm/rtas.h>
#include <asm/udbg.h>
#include <asm/nvram.h>
#include "maple.h"
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
static unsigned long maple_find_nvram_base(void)
{
struct device_node *rtcs;
unsigned long result = 0;
/* find NVRAM device */
rtcs = of_find_compatible_node(NULL, "nvram", "AMD8111");
if (rtcs) {
struct resource r;
if (of_address_to_resource(rtcs, 0, &r)) {
printk(KERN_EMERG "Maple: Unable to translate NVRAM"
" address\n");
goto bail;
}
if (!(r.flags & IORESOURCE_IO)) {
printk(KERN_EMERG "Maple: NVRAM address isn't PIO!\n");
goto bail;
}
result = r.start;
} else
printk(KERN_EMERG "Maple: Unable to find NVRAM\n");
bail:
of_node_put(rtcs);
return result;
}
static void __noreturn maple_restart(char *cmd)
{
unsigned int maple_nvram_base;
const unsigned int *maple_nvram_offset, *maple_nvram_command;
struct device_node *sp;
maple_nvram_base = maple_find_nvram_base();
if (maple_nvram_base == 0)
goto fail;
/* find service processor device */
sp = of_find_node_by_name(NULL, "service-processor");
if (!sp) {
printk(KERN_EMERG "Maple: Unable to find Service Processor\n");
goto fail;
}
maple_nvram_offset = of_get_property(sp, "restart-addr", NULL);
maple_nvram_command = of_get_property(sp, "restart-value", NULL);
of_node_put(sp);
/* send command */
outb_p(*maple_nvram_command, maple_nvram_base + *maple_nvram_offset);
for (;;) ;
fail:
printk(KERN_EMERG "Maple: Manual Restart Required\n");
for (;;) ;
}
static void __noreturn maple_power_off(void)
{
unsigned int maple_nvram_base;
const unsigned int *maple_nvram_offset, *maple_nvram_command;
struct device_node *sp;
maple_nvram_base = maple_find_nvram_base();
if (maple_nvram_base == 0)
goto fail;
/* find service processor device */
sp = of_find_node_by_name(NULL, "service-processor");
if (!sp) {
printk(KERN_EMERG "Maple: Unable to find Service Processor\n");
goto fail;
}
maple_nvram_offset = of_get_property(sp, "power-off-addr", NULL);
maple_nvram_command = of_get_property(sp, "power-off-value", NULL);
of_node_put(sp);
/* send command */
outb_p(*maple_nvram_command, maple_nvram_base + *maple_nvram_offset);
for (;;) ;
fail:
printk(KERN_EMERG "Maple: Manual Power-Down Required\n");
for (;;) ;
}
static void __noreturn maple_halt(void)
{
maple_power_off();
}
#ifdef CONFIG_SMP
static struct smp_ops_t maple_smp_ops = {
.probe = smp_mpic_probe,
.message_pass = smp_mpic_message_pass,
.kick_cpu = smp_generic_kick_cpu,
.setup_cpu = smp_mpic_setup_cpu,
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
};
#endif /* CONFIG_SMP */
static void __init maple_use_rtas_reboot_and_halt_if_present(void)
{
if (rtas_function_implemented(RTAS_FN_SYSTEM_REBOOT) &&
rtas_function_implemented(RTAS_FN_POWER_OFF)) {
ppc_md.restart = rtas_restart;
pm_power_off = rtas_power_off;
ppc_md.halt = rtas_halt;
}
}
static void __init maple_setup_arch(void)
{
/* init to some ~sane value until calibrate_delay() runs */
loops_per_jiffy = 50000000;
/* Setup SMP callback */
#ifdef CONFIG_SMP
smp_ops = &maple_smp_ops;
#endif
maple_use_rtas_reboot_and_halt_if_present();
printk(KERN_DEBUG "Using native/NAP idle loop\n");
mmio_nvram_init();
}
/*
* This is almost identical to pSeries and CHRP. We need to make that
* code generic at one point, with appropriate bits in the device-tree to
* identify the presence of an HT APIC
*/
static void __init maple_init_IRQ(void)
{
struct device_node *root, *np, *mpic_node = NULL;
const unsigned int *opprop;
unsigned long openpic_addr = 0;
int naddr, n, i, opplen, has_isus = 0;
struct mpic *mpic;
unsigned int flags = 0;
/* Locate MPIC in the device-tree. Note that there is a bug
* in Maple device-tree where the type of the controller is
* open-pic and not interrupt-controller
*/
for_each_node_by_type(np, "interrupt-controller")
if (of_device_is_compatible(np, "open-pic")) {
mpic_node = np;
break;
}
if (mpic_node == NULL)
for_each_node_by_type(np, "open-pic") {
mpic_node = np;
break;
}
if (mpic_node == NULL) {
printk(KERN_ERR
"Failed to locate the MPIC interrupt controller\n");
return;
}
/* Find address list in /platform-open-pic */
root = of_find_node_by_path("/");
naddr = of_n_addr_cells(root);
opprop = of_get_property(root, "platform-open-pic", &opplen);
if (opprop) {
openpic_addr = of_read_number(opprop, naddr);
has_isus = (opplen > naddr);
printk(KERN_DEBUG "OpenPIC addr: %lx, has ISUs: %d\n",
openpic_addr, has_isus);
}
BUG_ON(openpic_addr == 0);
/* Check for a big endian MPIC */
if (of_property_read_bool(np, "big-endian"))
flags |= MPIC_BIG_ENDIAN;
/* XXX Maple specific bits */
flags |= MPIC_U3_HT_IRQS;
/* All U3/U4 are big-endian, older SLOF firmware doesn't encode this */
flags |= MPIC_BIG_ENDIAN;
/* Setup the openpic driver. More device-tree junks, we hard code no
* ISUs for now. I'll have to revisit some stuffs with the folks doing
* the firmware for those
*/
mpic = mpic_alloc(mpic_node, openpic_addr, flags,
/*has_isus ? 16 :*/ 0, 0, " MPIC ");
BUG_ON(mpic == NULL);
/* Add ISUs */
opplen /= sizeof(u32);
for (n = 0, i = naddr; i < opplen; i += naddr, n++) {
unsigned long isuaddr = of_read_number(opprop + i, naddr);
mpic_assign_isu(mpic, n, isuaddr);
}
/* All ISUs are setup, complete initialization */
mpic_init(mpic);
ppc_md.get_irq = mpic_get_irq;
of_node_put(mpic_node);
of_node_put(root);
}
static void __init maple_progress(char *s, unsigned short hex)
{
printk("*** %04x : %s\n", hex, s ? s : "");
}
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
static int __init maple_probe(void)
{
if (!of_machine_is_compatible("Momentum,Maple") &&
!of_machine_is_compatible("Momentum,Apache"))
return 0;
pm_power_off = maple_power_off;
iommu_init_early_dart(&maple_pci_controller_ops);
return 1;
}
#ifdef CONFIG_EDAC
/*
* Register a platform device for CPC925 memory controller on
* all boards with U3H (CPC925) bridge.
*/
static int __init maple_cpc925_edac_setup(void)
{
struct platform_device *pdev;
struct device_node *np = NULL;
struct resource r;
int ret;
volatile void __iomem *mem;
u32 rev;
np = of_find_node_by_type(NULL, "memory-controller");
if (!np) {
printk(KERN_ERR "%s: Unable to find memory-controller node\n",
__func__);
return -ENODEV;
}
ret = of_address_to_resource(np, 0, &r);
of_node_put(np);
if (ret < 0) {
printk(KERN_ERR "%s: Unable to get memory-controller reg\n",
__func__);
return -ENODEV;
}
mem = ioremap(r.start, resource_size(&r));
if (!mem) {
printk(KERN_ERR "%s: Unable to map memory-controller memory\n",
__func__);
return -ENOMEM;
}
rev = __raw_readl(mem);
iounmap(mem);
if (rev < 0x34 || rev > 0x3f) { /* U3H */
printk(KERN_ERR "%s: Non-CPC925(U3H) bridge revision: %02x\n",
__func__, rev);
return 0;
}
pdev = platform_device_register_simple("cpc925_edac", 0, &r, 1);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
printk(KERN_INFO "%s: CPC925 platform device created\n", __func__);
return 0;
}
machine_device_initcall(maple, maple_cpc925_edac_setup);
#endif
define_machine(maple) {
.name = "Maple",
.probe = maple_probe,
.setup_arch = maple_setup_arch,
.discover_phbs = maple_pci_init,
.init_IRQ = maple_init_IRQ,
.pci_irq_fixup = maple_pci_irq_fixup,
.pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
.restart = maple_restart,
.halt = maple_halt,
.get_boot_time = maple_get_boot_time,
.set_rtc_time = maple_set_rtc_time,
.get_rtc_time = maple_get_rtc_time,
.progress = maple_progress,
.power_save = power4_idle,
};
| linux-master | arch/powerpc/platforms/maple/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) Copyright 2004 Benjamin Herrenschmidt ([email protected]),
* IBM Corp.
*/
#undef DEBUG
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
#include <linux/bcd.h>
#include <linux/of_address.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include "maple.h"
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
static int maple_rtc_addr;
static int maple_clock_read(int addr)
{
outb_p(addr, maple_rtc_addr);
return inb_p(maple_rtc_addr+1);
}
static void maple_clock_write(unsigned long val, int addr)
{
outb_p(addr, maple_rtc_addr);
outb_p(val, maple_rtc_addr+1);
}
void maple_get_rtc_time(struct rtc_time *tm)
{
do {
tm->tm_sec = maple_clock_read(RTC_SECONDS);
tm->tm_min = maple_clock_read(RTC_MINUTES);
tm->tm_hour = maple_clock_read(RTC_HOURS);
tm->tm_mday = maple_clock_read(RTC_DAY_OF_MONTH);
tm->tm_mon = maple_clock_read(RTC_MONTH);
tm->tm_year = maple_clock_read(RTC_YEAR);
} while (tm->tm_sec != maple_clock_read(RTC_SECONDS));
if (!(maple_clock_read(RTC_CONTROL) & RTC_DM_BINARY)
|| RTC_ALWAYS_BCD) {
tm->tm_sec = bcd2bin(tm->tm_sec);
tm->tm_min = bcd2bin(tm->tm_min);
tm->tm_hour = bcd2bin(tm->tm_hour);
tm->tm_mday = bcd2bin(tm->tm_mday);
tm->tm_mon = bcd2bin(tm->tm_mon);
tm->tm_year = bcd2bin(tm->tm_year);
}
if ((tm->tm_year + 1900) < 1970)
tm->tm_year += 100;
tm->tm_wday = -1;
}
int maple_set_rtc_time(struct rtc_time *tm)
{
unsigned char save_control, save_freq_select;
int sec, min, hour, mon, mday, year;
spin_lock(&rtc_lock);
save_control = maple_clock_read(RTC_CONTROL); /* tell the clock it's being set */
maple_clock_write((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = maple_clock_read(RTC_FREQ_SELECT); /* stop and reset prescaler */
maple_clock_write((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
sec = tm->tm_sec;
min = tm->tm_min;
hour = tm->tm_hour;
mon = tm->tm_mon;
mday = tm->tm_mday;
year = tm->tm_year;
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
sec = bin2bcd(sec);
min = bin2bcd(min);
hour = bin2bcd(hour);
mon = bin2bcd(mon);
mday = bin2bcd(mday);
year = bin2bcd(year);
}
maple_clock_write(sec, RTC_SECONDS);
maple_clock_write(min, RTC_MINUTES);
maple_clock_write(hour, RTC_HOURS);
maple_clock_write(mon, RTC_MONTH);
maple_clock_write(mday, RTC_DAY_OF_MONTH);
maple_clock_write(year, RTC_YEAR);
/* The following flags have to be released exactly in this order,
* otherwise the DS12887 (popular MC146818A clone with integrated
* battery and quartz) will not reset the oscillator and will not
* update precisely 500 ms later. You won't find this mentioned in
* the Dallas Semiconductor data sheets, but who believes data
* sheets anyway ... -- Markus Kuhn
*/
maple_clock_write(save_control, RTC_CONTROL);
maple_clock_write(save_freq_select, RTC_FREQ_SELECT);
spin_unlock(&rtc_lock);
return 0;
}
static struct resource rtc_iores = {
.name = "rtc",
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
};
time64_t __init maple_get_boot_time(void)
{
struct rtc_time tm;
struct device_node *rtcs;
rtcs = of_find_compatible_node(NULL, "rtc", "pnpPNP,b00");
if (rtcs) {
struct resource r;
if (of_address_to_resource(rtcs, 0, &r)) {
printk(KERN_EMERG "Maple: Unable to translate RTC"
" address\n");
goto bail;
}
if (!(r.flags & IORESOURCE_IO)) {
printk(KERN_EMERG "Maple: RTC address isn't PIO!\n");
goto bail;
}
maple_rtc_addr = r.start;
printk(KERN_INFO "Maple: Found RTC at IO 0x%x\n",
maple_rtc_addr);
}
bail:
of_node_put(rtcs);
if (maple_rtc_addr == 0) {
maple_rtc_addr = RTC_PORT(0); /* legacy address */
printk(KERN_INFO "Maple: No device node for RTC, assuming "
"legacy address (0x%x)\n", maple_rtc_addr);
}
rtc_iores.start = maple_rtc_addr;
rtc_iores.end = maple_rtc_addr + 7;
request_resource(&ioport_resource, &rtc_iores);
maple_get_rtc_time(&tm);
return rtc_tm_to_time64(&tm);
}
| linux-master | arch/powerpc/platforms/maple/time.c |
/*
* Microwatt FPGA-based SoC platform setup code.
*
* Copyright 2020 Paul Mackerras ([email protected]), IBM Corp.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/xics.h>
#include <asm/udbg.h>
#include "microwatt.h"
static void __init microwatt_init_IRQ(void)
{
xics_init();
}
static int __init microwatt_populate(void)
{
return of_platform_default_populate(NULL, NULL, NULL);
}
machine_arch_initcall(microwatt, microwatt_populate);
static void __init microwatt_setup_arch(void)
{
microwatt_rng_init();
}
define_machine(microwatt) {
.name = "microwatt",
.compatible = "microwatt-soc",
.init_IRQ = microwatt_init_IRQ,
.setup_arch = microwatt_setup_arch,
.progress = udbg_progress,
};
| linux-master | arch/powerpc/platforms/microwatt/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Derived from arch/powerpc/platforms/powernv/rng.c, which is:
* Copyright 2013, Michael Ellerman, IBM Corporation.
*/
#define pr_fmt(fmt) "microwatt-rng: " fmt
#include <linux/kernel.h>
#include <linux/smp.h>
#include <asm/archrandom.h>
#include <asm/cputable.h>
#include <asm/machdep.h>
#include "microwatt.h"
#define DARN_ERR 0xFFFFFFFFFFFFFFFFul
static int microwatt_get_random_darn(unsigned long *v)
{
unsigned long val;
/* Using DARN with L=1 - 64-bit conditioned random number */
asm volatile(PPC_DARN(%0, 1) : "=r"(val));
if (val == DARN_ERR)
return 0;
*v = val;
return 1;
}
void __init microwatt_rng_init(void)
{
unsigned long val;
int i;
for (i = 0; i < 10; i++) {
if (microwatt_get_random_darn(&val)) {
ppc_md.get_random_seed = microwatt_get_random_darn;
return;
}
}
}
| linux-master | arch/powerpc/platforms/microwatt/rng.c |
/*
* IBM PowerPC IBM eBus Infrastructure Support.
*
* Copyright (c) 2005 IBM Corporation
* Joachim Fenkes <[email protected]>
* Heiko J Schick <[email protected]>
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/console.h>
#include <linux/kobject.h>
#include <linux/dma-map-ops.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <asm/ibmebus.h>
#include <asm/machdep.h>
static struct device ibmebus_bus_device = { /* fake "parent" device */
.init_name = "ibmebus",
};
struct bus_type ibmebus_bus_type;
/* These devices will automatically be added to the bus during init */
static const struct of_device_id ibmebus_matches[] __initconst = {
{ .compatible = "IBM,lhca" },
{ .compatible = "IBM,lhea" },
{},
};
static void *ibmebus_alloc_coherent(struct device *dev,
size_t size,
dma_addr_t *dma_handle,
gfp_t flag,
unsigned long attrs)
{
void *mem;
mem = kmalloc(size, flag);
*dma_handle = (dma_addr_t)mem;
return mem;
}
static void ibmebus_free_coherent(struct device *dev,
size_t size, void *vaddr,
dma_addr_t dma_handle,
unsigned long attrs)
{
kfree(vaddr);
}
static dma_addr_t ibmebus_map_page(struct device *dev,
struct page *page,
unsigned long offset,
size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
return (dma_addr_t)(page_address(page) + offset);
}
static void ibmebus_unmap_page(struct device *dev,
dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
return;
}
static int ibmebus_map_sg(struct device *dev,
struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nents, i) {
sg->dma_address = (dma_addr_t) sg_virt(sg);
sg->dma_length = sg->length;
}
return nents;
}
static void ibmebus_unmap_sg(struct device *dev,
struct scatterlist *sg,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
return;
}
static int ibmebus_dma_supported(struct device *dev, u64 mask)
{
return mask == DMA_BIT_MASK(64);
}
static u64 ibmebus_dma_get_required_mask(struct device *dev)
{
return DMA_BIT_MASK(64);
}
static const struct dma_map_ops ibmebus_dma_ops = {
.alloc = ibmebus_alloc_coherent,
.free = ibmebus_free_coherent,
.map_sg = ibmebus_map_sg,
.unmap_sg = ibmebus_unmap_sg,
.dma_supported = ibmebus_dma_supported,
.get_required_mask = ibmebus_dma_get_required_mask,
.map_page = ibmebus_map_page,
.unmap_page = ibmebus_unmap_page,
};
static int ibmebus_match_path(struct device *dev, const void *data)
{
struct device_node *dn = to_platform_device(dev)->dev.of_node;
struct device_node *tn = of_find_node_by_path(data);
of_node_put(tn);
return (tn == dn);
}
static int ibmebus_match_node(struct device *dev, const void *data)
{
return to_platform_device(dev)->dev.of_node == data;
}
static int ibmebus_create_device(struct device_node *dn)
{
struct platform_device *dev;
int ret;
dev = of_device_alloc(dn, NULL, &ibmebus_bus_device);
if (!dev)
return -ENOMEM;
dev->dev.bus = &ibmebus_bus_type;
dev->dev.dma_ops = &ibmebus_dma_ops;
ret = of_device_add(dev);
if (ret)
platform_device_put(dev);
return ret;
}
static int ibmebus_create_devices(const struct of_device_id *matches)
{
struct device_node *root, *child;
struct device *dev;
int ret = 0;
root = of_find_node_by_path("/");
for_each_child_of_node(root, child) {
if (!of_match_node(matches, child))
continue;
dev = bus_find_device(&ibmebus_bus_type, NULL, child,
ibmebus_match_node);
if (dev) {
put_device(dev);
continue;
}
ret = ibmebus_create_device(child);
if (ret) {
printk(KERN_ERR "%s: failed to create device (%i)",
__func__, ret);
of_node_put(child);
break;
}
}
of_node_put(root);
return ret;
}
int ibmebus_register_driver(struct platform_driver *drv)
{
/* If the driver uses devices that ibmebus doesn't know, add them */
ibmebus_create_devices(drv->driver.of_match_table);
drv->driver.bus = &ibmebus_bus_type;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL(ibmebus_register_driver);
void ibmebus_unregister_driver(struct platform_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(ibmebus_unregister_driver);
int ibmebus_request_irq(u32 ist, irq_handler_t handler,
unsigned long irq_flags, const char *devname,
void *dev_id)
{
unsigned int irq = irq_create_mapping(NULL, ist);
if (!irq)
return -EINVAL;
return request_irq(irq, handler, irq_flags, devname, dev_id);
}
EXPORT_SYMBOL(ibmebus_request_irq);
void ibmebus_free_irq(u32 ist, void *dev_id)
{
unsigned int irq = irq_find_mapping(NULL, ist);
free_irq(irq, dev_id);
irq_dispose_mapping(irq);
}
EXPORT_SYMBOL(ibmebus_free_irq);
static char *ibmebus_chomp(const char *in, size_t count)
{
char *out = kmalloc(count + 1, GFP_KERNEL);
if (!out)
return NULL;
memcpy(out, in, count);
out[count] = '\0';
if (out[count - 1] == '\n')
out[count - 1] = '\0';
return out;
}
static ssize_t probe_store(const struct bus_type *bus, const char *buf, size_t count)
{
struct device_node *dn = NULL;
struct device *dev;
char *path;
ssize_t rc = 0;
path = ibmebus_chomp(buf, count);
if (!path)
return -ENOMEM;
dev = bus_find_device(&ibmebus_bus_type, NULL, path,
ibmebus_match_path);
if (dev) {
put_device(dev);
printk(KERN_WARNING "%s: %s has already been probed\n",
__func__, path);
rc = -EEXIST;
goto out;
}
if ((dn = of_find_node_by_path(path))) {
rc = ibmebus_create_device(dn);
of_node_put(dn);
} else {
printk(KERN_WARNING "%s: no such device node: %s\n",
__func__, path);
rc = -ENODEV;
}
out:
kfree(path);
if (rc)
return rc;
return count;
}
static BUS_ATTR_WO(probe);
static ssize_t remove_store(const struct bus_type *bus, const char *buf, size_t count)
{
struct device *dev;
char *path;
path = ibmebus_chomp(buf, count);
if (!path)
return -ENOMEM;
if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path,
ibmebus_match_path))) {
of_device_unregister(to_platform_device(dev));
put_device(dev);
kfree(path);
return count;
} else {
printk(KERN_WARNING "%s: %s not on the bus\n",
__func__, path);
kfree(path);
return -ENODEV;
}
}
static BUS_ATTR_WO(remove);
static struct attribute *ibmbus_bus_attrs[] = {
&bus_attr_probe.attr,
&bus_attr_remove.attr,
NULL,
};
ATTRIBUTE_GROUPS(ibmbus_bus);
static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv)
{
const struct of_device_id *matches = drv->of_match_table;
if (!matches)
return 0;
return of_match_device(matches, dev) != NULL;
}
static int ibmebus_bus_device_probe(struct device *dev)
{
int error = -ENODEV;
struct platform_driver *drv;
struct platform_device *of_dev;
drv = to_platform_driver(dev->driver);
of_dev = to_platform_device(dev);
if (!drv->probe)
return error;
get_device(dev);
if (of_driver_match_device(dev, dev->driver))
error = drv->probe(of_dev);
if (error)
put_device(dev);
return error;
}
static void ibmebus_bus_device_remove(struct device *dev)
{
struct platform_device *of_dev = to_platform_device(dev);
struct platform_driver *drv = to_platform_driver(dev->driver);
if (dev->driver && drv->remove)
drv->remove(of_dev);
}
static void ibmebus_bus_device_shutdown(struct device *dev)
{
struct platform_device *of_dev = to_platform_device(dev);
struct platform_driver *drv = to_platform_driver(dev->driver);
if (dev->driver && drv->shutdown)
drv->shutdown(of_dev);
}
/*
* ibmebus_bus_device_attrs
*/
static ssize_t devspec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ofdev;
ofdev = to_platform_device(dev);
return sprintf(buf, "%pOF\n", ofdev->dev.of_node);
}
static DEVICE_ATTR_RO(devspec);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *ofdev;
ofdev = to_platform_device(dev);
return sprintf(buf, "%pOFn\n", ofdev->dev.of_node);
}
static DEVICE_ATTR_RO(name);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return of_device_modalias(dev, buf, PAGE_SIZE);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *ibmebus_bus_device_attrs[] = {
&dev_attr_devspec.attr,
&dev_attr_name.attr,
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(ibmebus_bus_device);
static int ibmebus_bus_modalias(const struct device *dev, struct kobj_uevent_env *env)
{
return of_device_uevent_modalias(dev, env);
}
struct bus_type ibmebus_bus_type = {
.name = "ibmebus",
.uevent = ibmebus_bus_modalias,
.bus_groups = ibmbus_bus_groups,
.match = ibmebus_bus_bus_match,
.probe = ibmebus_bus_device_probe,
.remove = ibmebus_bus_device_remove,
.shutdown = ibmebus_bus_device_shutdown,
.dev_groups = ibmebus_bus_device_groups,
};
EXPORT_SYMBOL(ibmebus_bus_type);
static int __init ibmebus_bus_init(void)
{
int err;
printk(KERN_INFO "IBM eBus Device Driver\n");
err = bus_register(&ibmebus_bus_type);
if (err) {
printk(KERN_ERR "%s: failed to register IBM eBus.\n",
__func__);
return err;
}
err = device_register(&ibmebus_bus_device);
if (err) {
printk(KERN_WARNING "%s: device_register returned %i\n",
__func__, err);
put_device(&ibmebus_bus_device);
bus_unregister(&ibmebus_bus_type);
return err;
}
err = ibmebus_create_devices(ibmebus_matches);
if (err) {
device_unregister(&ibmebus_bus_device);
bus_unregister(&ibmebus_bus_type);
return err;
}
return 0;
}
machine_postcore_initcall(pseries, ibmebus_bus_init);
| linux-master | arch/powerpc/platforms/pseries/ibmebus.c |
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) "papr-sysparm: " fmt
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <asm/rtas.h>
#include <asm/papr-sysparm.h>
#include <asm/rtas-work-area.h>
struct papr_sysparm_buf *papr_sysparm_buf_alloc(void)
{
struct papr_sysparm_buf *buf = kzalloc(sizeof(*buf), GFP_KERNEL);
return buf;
}
void papr_sysparm_buf_free(struct papr_sysparm_buf *buf)
{
kfree(buf);
}
/**
* papr_sysparm_get() - Retrieve the value of a PAPR system parameter.
* @param: PAPR system parameter token as described in
* 7.3.16 "System Parameters Option".
* @buf: A &struct papr_sysparm_buf as returned from papr_sysparm_buf_alloc().
*
* Place the result of querying the specified parameter, if available,
* in @buf. The result includes a be16 length header followed by the
* value, which may be a string or binary data. See &struct papr_sysparm_buf.
*
* Since there is at least one parameter (60, OS Service Entitlement
* Status) where the results depend on the incoming contents of the
* work area, the caller-supplied buffer is copied unmodified into the
* work area before calling ibm,get-system-parameter.
*
* A defined parameter may not be implemented on a given system, and
* some implemented parameters may not be available to all partitions
* on a system. A parameter's disposition may change at any time due
* to system configuration changes or partition migration.
*
* Context: This function may sleep.
*
* Return: 0 on success, -errno otherwise. @buf is unmodified on error.
*/
int papr_sysparm_get(papr_sysparm_t param, struct papr_sysparm_buf *buf)
{
const s32 token = rtas_function_token(RTAS_FN_IBM_GET_SYSTEM_PARAMETER);
struct rtas_work_area *work_area;
s32 fwrc;
int ret;
might_sleep();
if (WARN_ON(!buf))
return -EFAULT;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
work_area = rtas_work_area_alloc(sizeof(*buf));
memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf));
do {
fwrc = rtas_call(token, 3, 1, NULL, param.token,
rtas_work_area_phys(work_area),
rtas_work_area_size(work_area));
} while (rtas_busy_delay(fwrc));
switch (fwrc) {
case 0:
ret = 0;
memcpy(buf, rtas_work_area_raw_buf(work_area), sizeof(*buf));
break;
case -3: /* parameter not implemented */
ret = -EOPNOTSUPP;
break;
case -9002: /* this partition not authorized to retrieve this parameter */
ret = -EPERM;
break;
case -9999: /* "parameter error" e.g. the buffer is too small */
ret = -EINVAL;
break;
default:
pr_err("unexpected ibm,get-system-parameter result %d\n", fwrc);
fallthrough;
case -1: /* Hardware/platform error */
ret = -EIO;
break;
}
rtas_work_area_free(work_area);
return ret;
}
int papr_sysparm_set(papr_sysparm_t param, const struct papr_sysparm_buf *buf)
{
const s32 token = rtas_function_token(RTAS_FN_IBM_SET_SYSTEM_PARAMETER);
struct rtas_work_area *work_area;
s32 fwrc;
int ret;
might_sleep();
if (WARN_ON(!buf))
return -EFAULT;
if (token == RTAS_UNKNOWN_SERVICE)
return -ENOENT;
work_area = rtas_work_area_alloc(sizeof(*buf));
memcpy(rtas_work_area_raw_buf(work_area), buf, sizeof(*buf));
do {
fwrc = rtas_call(token, 2, 1, NULL, param.token,
rtas_work_area_phys(work_area));
} while (rtas_busy_delay(fwrc));
switch (fwrc) {
case 0:
ret = 0;
break;
case -3: /* parameter not supported */
ret = -EOPNOTSUPP;
break;
case -9002: /* this partition not authorized to modify this parameter */
ret = -EPERM;
break;
case -9999: /* "parameter error" e.g. invalid input data */
ret = -EINVAL;
break;
default:
pr_err("unexpected ibm,set-system-parameter result %d\n", fwrc);
fallthrough;
case -1: /* Hardware/platform error */
ret = -EIO;
break;
}
rtas_work_area_free(work_area);
return ret;
}
| linux-master | arch/powerpc/platforms/pseries/papr-sysparm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2006 Jake Moilanen <[email protected]>, IBM Corp.
* Copyright 2006-2007 Michael Ellerman, IBM Corp.
*/
#include <linux/crash_dump.h>
#include <linux/device.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <asm/rtas.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/machdep.h>
#include <asm/xive.h>
#include "pseries.h"
static int query_token, change_token;
#define RTAS_QUERY_FN 0
#define RTAS_CHANGE_FN 1
#define RTAS_RESET_FN 2
#define RTAS_CHANGE_MSI_FN 3
#define RTAS_CHANGE_MSIX_FN 4
#define RTAS_CHANGE_32MSI_FN 5
/* RTAS Helpers */
static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
{
u32 addr, seq_num, rtas_ret[3];
unsigned long buid;
int rc;
addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
buid = pdn->phb->buid;
seq_num = 1;
do {
if (func == RTAS_CHANGE_MSI_FN || func == RTAS_CHANGE_MSIX_FN ||
func == RTAS_CHANGE_32MSI_FN)
rc = rtas_call(change_token, 6, 4, rtas_ret, addr,
BUID_HI(buid), BUID_LO(buid),
func, num_irqs, seq_num);
else
rc = rtas_call(change_token, 6, 3, rtas_ret, addr,
BUID_HI(buid), BUID_LO(buid),
func, num_irqs, seq_num);
seq_num = rtas_ret[1];
} while (rtas_busy_delay(rc));
/*
* If the RTAS call succeeded, return the number of irqs allocated.
* If not, make sure we return a negative error code.
*/
if (rc == 0)
rc = rtas_ret[0];
else if (rc > 0)
rc = -rc;
pr_debug("rtas_msi: ibm,change_msi(func=%d,num=%d), got %d rc = %d\n",
func, num_irqs, rtas_ret[0], rc);
return rc;
}
static void rtas_disable_msi(struct pci_dev *pdev)
{
struct pci_dn *pdn;
pdn = pci_get_pdn(pdev);
if (!pdn)
return;
/*
* disabling MSI with the explicit interface also disables MSI-X
*/
if (rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, 0) != 0) {
/*
* may have failed because explicit interface is not
* present
*/
if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0) {
pr_debug("rtas_msi: Setting MSIs to 0 failed!\n");
}
}
}
static int rtas_query_irq_number(struct pci_dn *pdn, int offset)
{
u32 addr, rtas_ret[2];
unsigned long buid;
int rc;
addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
buid = pdn->phb->buid;
do {
rc = rtas_call(query_token, 4, 3, rtas_ret, addr,
BUID_HI(buid), BUID_LO(buid), offset);
} while (rtas_busy_delay(rc));
if (rc) {
pr_debug("rtas_msi: error (%d) querying source number\n", rc);
return rc;
}
return rtas_ret[0];
}
static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
{
struct device_node *dn;
const __be32 *p;
u32 req_msi;
dn = pci_device_to_OF_node(pdev);
p = of_get_property(dn, prop_name, NULL);
if (!p) {
pr_debug("rtas_msi: No %s on %pOF\n", prop_name, dn);
return -ENOENT;
}
req_msi = be32_to_cpup(p);
if (req_msi < nvec) {
pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
if (req_msi == 0) /* Be paranoid */
return -ENOSPC;
return req_msi;
}
return 0;
}
static int check_req_msi(struct pci_dev *pdev, int nvec)
{
return check_req(pdev, nvec, "ibm,req#msi");
}
static int check_req_msix(struct pci_dev *pdev, int nvec)
{
return check_req(pdev, nvec, "ibm,req#msi-x");
}
/* Quota calculation */
static struct device_node *__find_pe_total_msi(struct device_node *node, int *total)
{
struct device_node *dn;
const __be32 *p;
dn = of_node_get(node);
while (dn) {
p = of_get_property(dn, "ibm,pe-total-#msi", NULL);
if (p) {
pr_debug("rtas_msi: found prop on dn %pOF\n",
dn);
*total = be32_to_cpup(p);
return dn;
}
dn = of_get_next_parent(dn);
}
return NULL;
}
static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
{
return __find_pe_total_msi(pci_device_to_OF_node(dev), total);
}
static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
{
struct device_node *dn;
struct eeh_dev *edev;
/* Found our PE and assume 8 at that point. */
dn = pci_device_to_OF_node(dev);
if (!dn)
return NULL;
/* Get the top level device in the PE */
edev = pdn_to_eeh_dev(PCI_DN(dn));
if (edev->pe)
edev = list_first_entry(&edev->pe->edevs, struct eeh_dev,
entry);
dn = pci_device_to_OF_node(edev->pdev);
if (!dn)
return NULL;
/* We actually want the parent */
dn = of_get_parent(dn);
if (!dn)
return NULL;
/* Hardcode of 8 for old firmwares */
*total = 8;
pr_debug("rtas_msi: using PE dn %pOF\n", dn);
return dn;
}
struct msi_counts {
struct device_node *requestor;
int num_devices;
int request;
int quota;
int spare;
int over_quota;
};
static void *count_non_bridge_devices(struct device_node *dn, void *data)
{
struct msi_counts *counts = data;
const __be32 *p;
u32 class;
pr_debug("rtas_msi: counting %pOF\n", dn);
p = of_get_property(dn, "class-code", NULL);
class = p ? be32_to_cpup(p) : 0;
if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
counts->num_devices++;
return NULL;
}
static void *count_spare_msis(struct device_node *dn, void *data)
{
struct msi_counts *counts = data;
const __be32 *p;
int req;
if (dn == counts->requestor)
req = counts->request;
else {
/* We don't know if a driver will try to use MSI or MSI-X,
* so we just have to punt and use the larger of the two. */
req = 0;
p = of_get_property(dn, "ibm,req#msi", NULL);
if (p)
req = be32_to_cpup(p);
p = of_get_property(dn, "ibm,req#msi-x", NULL);
if (p)
req = max(req, (int)be32_to_cpup(p));
}
if (req < counts->quota)
counts->spare += counts->quota - req;
else if (req > counts->quota)
counts->over_quota++;
return NULL;
}
static int msi_quota_for_device(struct pci_dev *dev, int request)
{
struct device_node *pe_dn;
struct msi_counts counts;
int total;
pr_debug("rtas_msi: calc quota for %s, request %d\n", pci_name(dev),
request);
pe_dn = find_pe_total_msi(dev, &total);
if (!pe_dn)
pe_dn = find_pe_dn(dev, &total);
if (!pe_dn) {
pr_err("rtas_msi: couldn't find PE for %s\n", pci_name(dev));
goto out;
}
pr_debug("rtas_msi: found PE %pOF\n", pe_dn);
memset(&counts, 0, sizeof(struct msi_counts));
/* Work out how many devices we have below this PE */
pci_traverse_device_nodes(pe_dn, count_non_bridge_devices, &counts);
if (counts.num_devices == 0) {
pr_err("rtas_msi: found 0 devices under PE for %s\n",
pci_name(dev));
goto out;
}
counts.quota = total / counts.num_devices;
if (request <= counts.quota)
goto out;
/* else, we have some more calculating to do */
counts.requestor = pci_device_to_OF_node(dev);
counts.request = request;
pci_traverse_device_nodes(pe_dn, count_spare_msis, &counts);
/* If the quota isn't an integer multiple of the total, we can
* use the remainder as spare MSIs for anyone that wants them. */
counts.spare += total % counts.num_devices;
/* Divide any spare by the number of over-quota requestors */
if (counts.over_quota)
counts.quota += counts.spare / counts.over_quota;
/* And finally clamp the request to the possibly adjusted quota */
request = min(counts.quota, request);
pr_debug("rtas_msi: request clamped to quota %d\n", request);
out:
of_node_put(pe_dn);
return request;
}
static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
{
u32 addr_hi, addr_lo;
/*
* We should only get in here for IODA1 configs. This is based on the
* fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS
* support, and we are in a PCIe Gen2 slot.
*/
dev_info(&pdev->dev,
"rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n");
pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi);
addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4);
pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo);
pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0);
}
static int rtas_prepare_msi_irqs(struct pci_dev *pdev, int nvec_in, int type,
msi_alloc_info_t *arg)
{
struct pci_dn *pdn;
int quota, rc;
int nvec = nvec_in;
int use_32bit_msi_hack = 0;
if (type == PCI_CAP_ID_MSIX)
rc = check_req_msix(pdev, nvec);
else
rc = check_req_msi(pdev, nvec);
if (rc)
return rc;
quota = msi_quota_for_device(pdev, nvec);
if (quota && quota < nvec)
return quota;
/*
* Firmware currently refuse any non power of two allocation
* so we round up if the quota will allow it.
*/
if (type == PCI_CAP_ID_MSIX) {
int m = roundup_pow_of_two(nvec);
quota = msi_quota_for_device(pdev, m);
if (quota >= m)
nvec = m;
}
pdn = pci_get_pdn(pdev);
/*
* Try the new more explicit firmware interface, if that fails fall
* back to the old interface. The old interface is known to never
* return MSI-Xs.
*/
again:
if (type == PCI_CAP_ID_MSI) {
if (pdev->no_64bit_msi) {
rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
if (rc < 0) {
/*
* We only want to run the 32 bit MSI hack below if
* the max bus speed is Gen2 speed
*/
if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT)
return rc;
use_32bit_msi_hack = 1;
}
} else
rc = -1;
if (rc < 0)
rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
if (rc < 0) {
pr_debug("rtas_msi: trying the old firmware call.\n");
rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec);
}
if (use_32bit_msi_hack && rc > 0)
rtas_hack_32bit_msi_gen2(pdev);
} else
rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
if (rc != nvec) {
if (nvec != nvec_in) {
nvec = nvec_in;
goto again;
}
pr_debug("rtas_msi: rtas_change_msi() failed\n");
return rc;
}
return 0;
}
static int pseries_msi_ops_prepare(struct irq_domain *domain, struct device *dev,
int nvec, msi_alloc_info_t *arg)
{
struct pci_dev *pdev = to_pci_dev(dev);
int type = pdev->msix_enabled ? PCI_CAP_ID_MSIX : PCI_CAP_ID_MSI;
return rtas_prepare_msi_irqs(pdev, nvec, type, arg);
}
/*
* ->msi_free() is called before irq_domain_free_irqs_top() when the
* handler data is still available. Use that to clear the XIVE
* controller data.
*/
static void pseries_msi_ops_msi_free(struct irq_domain *domain,
struct msi_domain_info *info,
unsigned int irq)
{
if (xive_enabled())
xive_irq_free_data(irq);
}
/*
* RTAS can not disable one MSI at a time. It's all or nothing. Do it
* at the end after all IRQs have been freed.
*/
static void pseries_msi_post_free(struct irq_domain *domain, struct device *dev)
{
if (WARN_ON_ONCE(!dev_is_pci(dev)))
return;
rtas_disable_msi(to_pci_dev(dev));
}
static struct msi_domain_ops pseries_pci_msi_domain_ops = {
.msi_prepare = pseries_msi_ops_prepare,
.msi_free = pseries_msi_ops_msi_free,
.msi_post_free = pseries_msi_post_free,
};
static void pseries_msi_shutdown(struct irq_data *d)
{
d = d->parent_data;
if (d->chip->irq_shutdown)
d->chip->irq_shutdown(d);
}
static void pseries_msi_mask(struct irq_data *d)
{
pci_msi_mask_irq(d);
irq_chip_mask_parent(d);
}
static void pseries_msi_unmask(struct irq_data *d)
{
pci_msi_unmask_irq(d);
irq_chip_unmask_parent(d);
}
static void pseries_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
{
struct msi_desc *entry = irq_data_get_msi_desc(data);
/*
* Do not update the MSIx vector table. It's not strictly necessary
* because the table is initialized by the underlying hypervisor, PowerVM
* or QEMU/KVM. However, if the MSIx vector entry is cleared, any further
* activation will fail. This can happen in some drivers (eg. IPR) which
* deactivate an IRQ used for testing MSI support.
*/
entry->msg = *msg;
}
static struct irq_chip pseries_pci_msi_irq_chip = {
.name = "pSeries-PCI-MSI",
.irq_shutdown = pseries_msi_shutdown,
.irq_mask = pseries_msi_mask,
.irq_unmask = pseries_msi_unmask,
.irq_eoi = irq_chip_eoi_parent,
.irq_write_msi_msg = pseries_msi_write_msg,
};
/*
* Set MSI_FLAG_MSIX_CONTIGUOUS as there is no way to express to
* firmware to request a discontiguous or non-zero based range of
* MSI-X entries. Core code will reject such setup attempts.
*/
static struct msi_domain_info pseries_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX |
MSI_FLAG_MSIX_CONTIGUOUS),
.ops = &pseries_pci_msi_domain_ops,
.chip = &pseries_pci_msi_irq_chip,
};
static void pseries_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
{
__pci_read_msi_msg(irq_data_get_msi_desc(data), msg);
}
static struct irq_chip pseries_msi_irq_chip = {
.name = "pSeries-MSI",
.irq_shutdown = pseries_msi_shutdown,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = irq_chip_set_affinity_parent,
.irq_compose_msi_msg = pseries_msi_compose_msg,
};
static int pseries_irq_parent_domain_alloc(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
{
struct irq_fwspec parent_fwspec;
int ret;
parent_fwspec.fwnode = domain->parent->fwnode;
parent_fwspec.param_count = 2;
parent_fwspec.param[0] = hwirq;
parent_fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
if (ret)
return ret;
return 0;
}
static int pseries_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct pci_controller *phb = domain->host_data;
msi_alloc_info_t *info = arg;
struct msi_desc *desc = info->desc;
struct pci_dev *pdev = msi_desc_to_pci_dev(desc);
int hwirq;
int i, ret;
hwirq = rtas_query_irq_number(pci_get_pdn(pdev), desc->msi_index);
if (hwirq < 0) {
dev_err(&pdev->dev, "Failed to query HW IRQ: %d\n", hwirq);
return hwirq;
}
dev_dbg(&pdev->dev, "%s bridge %pOF %d/%x #%d\n", __func__,
phb->dn, virq, hwirq, nr_irqs);
for (i = 0; i < nr_irqs; i++) {
ret = pseries_irq_parent_domain_alloc(domain, virq + i, hwirq + i);
if (ret)
goto out;
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
&pseries_msi_irq_chip, domain->host_data);
}
return 0;
out:
/* TODO: handle RTAS cleanup in ->msi_finish() ? */
irq_domain_free_irqs_parent(domain, virq, i - 1);
return ret;
}
static void pseries_irq_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct pci_controller *phb = irq_data_get_irq_chip_data(d);
pr_debug("%s bridge %pOF %d #%d\n", __func__, phb->dn, virq, nr_irqs);
/* XIVE domain data is cleared through ->msi_free() */
}
static const struct irq_domain_ops pseries_irq_domain_ops = {
.alloc = pseries_irq_domain_alloc,
.free = pseries_irq_domain_free,
};
static int __pseries_msi_allocate_domains(struct pci_controller *phb,
unsigned int count)
{
struct irq_domain *parent = irq_get_default_host();
phb->fwnode = irq_domain_alloc_named_id_fwnode("pSeries-MSI",
phb->global_number);
if (!phb->fwnode)
return -ENOMEM;
phb->dev_domain = irq_domain_create_hierarchy(parent, 0, count,
phb->fwnode,
&pseries_irq_domain_ops, phb);
if (!phb->dev_domain) {
pr_err("PCI: failed to create IRQ domain bridge %pOF (domain %d)\n",
phb->dn, phb->global_number);
irq_domain_free_fwnode(phb->fwnode);
return -ENOMEM;
}
phb->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(phb->dn),
&pseries_msi_domain_info,
phb->dev_domain);
if (!phb->msi_domain) {
pr_err("PCI: failed to create MSI IRQ domain bridge %pOF (domain %d)\n",
phb->dn, phb->global_number);
irq_domain_free_fwnode(phb->fwnode);
irq_domain_remove(phb->dev_domain);
return -ENOMEM;
}
return 0;
}
int pseries_msi_allocate_domains(struct pci_controller *phb)
{
int count;
if (!__find_pe_total_msi(phb->dn, &count)) {
pr_err("PCI: failed to find MSIs for bridge %pOF (domain %d)\n",
phb->dn, phb->global_number);
return -ENOSPC;
}
return __pseries_msi_allocate_domains(phb, count);
}
void pseries_msi_free_domains(struct pci_controller *phb)
{
if (phb->msi_domain)
irq_domain_remove(phb->msi_domain);
if (phb->dev_domain)
irq_domain_remove(phb->dev_domain);
if (phb->fwnode)
irq_domain_free_fwnode(phb->fwnode);
}
static void rtas_msi_pci_irq_fixup(struct pci_dev *pdev)
{
/* No LSI -> leave MSIs (if any) configured */
if (!pdev->irq) {
dev_dbg(&pdev->dev, "rtas_msi: no LSI, nothing to do.\n");
return;
}
/* No MSI -> MSIs can't have been assigned by fw, leave LSI */
if (check_req_msi(pdev, 1) && check_req_msix(pdev, 1)) {
dev_dbg(&pdev->dev, "rtas_msi: no req#msi/x, nothing to do.\n");
return;
}
dev_dbg(&pdev->dev, "rtas_msi: disabling existing MSI.\n");
rtas_disable_msi(pdev);
}
static int rtas_msi_init(void)
{
query_token = rtas_function_token(RTAS_FN_IBM_QUERY_INTERRUPT_SOURCE_NUMBER);
change_token = rtas_function_token(RTAS_FN_IBM_CHANGE_MSI);
if ((query_token == RTAS_UNKNOWN_SERVICE) ||
(change_token == RTAS_UNKNOWN_SERVICE)) {
pr_debug("rtas_msi: no RTAS tokens, no MSI support.\n");
return -1;
}
pr_debug("rtas_msi: Registering RTAS MSI callbacks.\n");
WARN_ON(ppc_md.pci_irq_fixup);
ppc_md.pci_irq_fixup = rtas_msi_pci_irq_fixup;
return 0;
}
machine_arch_initcall(pseries, rtas_msi_init);
| linux-master | arch/powerpc/platforms/pseries/msi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2020-21 IBM Corp.
*/
#define pr_fmt(fmt) "vas: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <asm/machdep.h>
#include <asm/hvcall.h>
#include <asm/plpar_wrappers.h>
#include <asm/firmware.h>
#include <asm/vphn.h>
#include <asm/vas.h>
#include "vas.h"
#define VAS_INVALID_WIN_ADDRESS 0xFFFFFFFFFFFFFFFFul
#define VAS_DEFAULT_DOMAIN_ID 0xFFFFFFFFFFFFFFFFul
/* The hypervisor allows one credit per window right now */
#define DEF_WIN_CREDS 1
static struct vas_all_caps caps_all;
static bool copypaste_feat;
static struct hv_vas_cop_feat_caps hv_cop_caps;
static struct vas_caps vascaps[VAS_MAX_FEAT_TYPE];
static DEFINE_MUTEX(vas_pseries_mutex);
static bool migration_in_progress;
static long hcall_return_busy_check(long rc)
{
/* Check if we are stalled for some time */
if (H_IS_LONG_BUSY(rc)) {
msleep(get_longbusy_msecs(rc));
rc = H_BUSY;
} else if (rc == H_BUSY) {
cond_resched();
}
return rc;
}
/*
* Allocate VAS window hcall
*/
static int h_allocate_vas_window(struct pseries_vas_window *win, u64 *domain,
u8 wintype, u16 credits)
{
long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
long rc;
do {
rc = plpar_hcall9(H_ALLOCATE_VAS_WINDOW, retbuf, wintype,
credits, domain[0], domain[1], domain[2],
domain[3], domain[4], domain[5]);
rc = hcall_return_busy_check(rc);
} while (rc == H_BUSY);
if (rc == H_SUCCESS) {
if (win->win_addr == VAS_INVALID_WIN_ADDRESS) {
pr_err("H_ALLOCATE_VAS_WINDOW: COPY/PASTE is not supported\n");
return -ENOTSUPP;
}
win->vas_win.winid = retbuf[0];
win->win_addr = retbuf[1];
win->complete_irq = retbuf[2];
win->fault_irq = retbuf[3];
return 0;
}
pr_err("H_ALLOCATE_VAS_WINDOW error: %ld, wintype: %u, credits: %u\n",
rc, wintype, credits);
return -EIO;
}
/*
* Deallocate VAS window hcall.
*/
static int h_deallocate_vas_window(u64 winid)
{
long rc;
do {
rc = plpar_hcall_norets(H_DEALLOCATE_VAS_WINDOW, winid);
rc = hcall_return_busy_check(rc);
} while (rc == H_BUSY);
if (rc == H_SUCCESS)
return 0;
pr_err("H_DEALLOCATE_VAS_WINDOW error: %ld, winid: %llu\n",
rc, winid);
return -EIO;
}
/*
* Modify VAS window.
* After the window is opened with allocate window hcall, configure it
* with flags and LPAR PID before using.
*/
static int h_modify_vas_window(struct pseries_vas_window *win)
{
long rc;
/*
* AMR value is not supported in Linux VAS implementation.
* The hypervisor ignores it if 0 is passed.
*/
do {
rc = plpar_hcall_norets(H_MODIFY_VAS_WINDOW,
win->vas_win.winid, win->pid, 0,
VAS_MOD_WIN_FLAGS, 0);
rc = hcall_return_busy_check(rc);
} while (rc == H_BUSY);
if (rc == H_SUCCESS)
return 0;
pr_err("H_MODIFY_VAS_WINDOW error: %ld, winid %u pid %u\n",
rc, win->vas_win.winid, win->pid);
return -EIO;
}
/*
* This hcall is used to determine the capabilities from the hypervisor.
* @hcall: H_QUERY_VAS_CAPABILITIES or H_QUERY_NX_CAPABILITIES
* @query_type: If 0 is passed, the hypervisor returns the overall
* capabilities which provides all feature(s) that are
* available. Then query the hypervisor to get the
* corresponding capabilities for the specific feature.
* Example: H_QUERY_VAS_CAPABILITIES provides VAS GZIP QoS
* and VAS GZIP Default capabilities.
* H_QUERY_NX_CAPABILITIES provides NX GZIP
* capabilities.
* @result: Return buffer to save capabilities.
*/
int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result)
{
long rc;
rc = plpar_hcall_norets(hcall, query_type, result);
if (rc == H_SUCCESS)
return 0;
/* H_FUNCTION means HV does not support VAS so don't print an error */
if (rc != H_FUNCTION) {
pr_err("%s error %ld, query_type %u, result buffer 0x%llx\n",
(hcall == H_QUERY_VAS_CAPABILITIES) ?
"H_QUERY_VAS_CAPABILITIES" :
"H_QUERY_NX_CAPABILITIES",
rc, query_type, result);
}
return -EIO;
}
EXPORT_SYMBOL_GPL(h_query_vas_capabilities);
/*
* hcall to get fault CRB from the hypervisor.
*/
static int h_get_nx_fault(u32 winid, u64 buffer)
{
long rc;
rc = plpar_hcall_norets(H_GET_NX_FAULT, winid, buffer);
if (rc == H_SUCCESS)
return 0;
pr_err("H_GET_NX_FAULT error: %ld, winid %u, buffer 0x%llx\n",
rc, winid, buffer);
return -EIO;
}
/*
* Handle the fault interrupt.
* When the fault interrupt is received for each window, query the
* hypervisor to get the fault CRB on the specific fault. Then
* process the CRB by updating CSB or send signal if the user space
* CSB is invalid.
* Note: The hypervisor forwards an interrupt for each fault request.
* So one fault CRB to process for each H_GET_NX_FAULT hcall.
*/
static irqreturn_t pseries_vas_fault_thread_fn(int irq, void *data)
{
struct pseries_vas_window *txwin = data;
struct coprocessor_request_block crb;
struct vas_user_win_ref *tsk_ref;
int rc;
while (atomic_read(&txwin->pending_faults)) {
rc = h_get_nx_fault(txwin->vas_win.winid, (u64)virt_to_phys(&crb));
if (!rc) {
tsk_ref = &txwin->vas_win.task_ref;
vas_dump_crb(&crb);
vas_update_csb(&crb, tsk_ref);
}
atomic_dec(&txwin->pending_faults);
}
return IRQ_HANDLED;
}
/*
* irq_default_primary_handler() can be used only with IRQF_ONESHOT
* which disables IRQ before executing the thread handler and enables
* it after. But this disabling interrupt sets the VAS IRQ OFF
* state in the hypervisor. If the NX generates fault interrupt
* during this window, the hypervisor will not deliver this
* interrupt to the LPAR. So use VAS specific IRQ handler instead
* of calling the default primary handler.
*/
static irqreturn_t pseries_vas_irq_handler(int irq, void *data)
{
struct pseries_vas_window *txwin = data;
/*
* The thread hanlder will process this interrupt if it is
* already running.
*/
atomic_inc(&txwin->pending_faults);
return IRQ_WAKE_THREAD;
}
/*
* Allocate window and setup IRQ mapping.
*/
static int allocate_setup_window(struct pseries_vas_window *txwin,
u64 *domain, u8 wintype)
{
int rc;
rc = h_allocate_vas_window(txwin, domain, wintype, DEF_WIN_CREDS);
if (rc)
return rc;
/*
* On PowerVM, the hypervisor setup and forwards the fault
* interrupt per window. So the IRQ setup and fault handling
* will be done for each open window separately.
*/
txwin->fault_virq = irq_create_mapping(NULL, txwin->fault_irq);
if (!txwin->fault_virq) {
pr_err("Failed irq mapping %d\n", txwin->fault_irq);
rc = -EINVAL;
goto out_win;
}
txwin->name = kasprintf(GFP_KERNEL, "vas-win-%d",
txwin->vas_win.winid);
if (!txwin->name) {
rc = -ENOMEM;
goto out_irq;
}
rc = request_threaded_irq(txwin->fault_virq,
pseries_vas_irq_handler,
pseries_vas_fault_thread_fn, 0,
txwin->name, txwin);
if (rc) {
pr_err("VAS-Window[%d]: Request IRQ(%u) failed with %d\n",
txwin->vas_win.winid, txwin->fault_virq, rc);
goto out_free;
}
txwin->vas_win.wcreds_max = DEF_WIN_CREDS;
return 0;
out_free:
kfree(txwin->name);
out_irq:
irq_dispose_mapping(txwin->fault_virq);
out_win:
h_deallocate_vas_window(txwin->vas_win.winid);
return rc;
}
static inline void free_irq_setup(struct pseries_vas_window *txwin)
{
free_irq(txwin->fault_virq, txwin);
kfree(txwin->name);
irq_dispose_mapping(txwin->fault_virq);
}
static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
enum vas_cop_type cop_type)
{
long domain[PLPAR_HCALL9_BUFSIZE] = {VAS_DEFAULT_DOMAIN_ID};
struct vas_cop_feat_caps *cop_feat_caps;
struct vas_caps *caps;
struct pseries_vas_window *txwin;
int rc;
txwin = kzalloc(sizeof(*txwin), GFP_KERNEL);
if (!txwin)
return ERR_PTR(-ENOMEM);
/*
* A VAS window can have many credits which means that many
* requests can be issued simultaneously. But the hypervisor
* restricts one credit per window.
* The hypervisor introduces 2 different types of credits:
* Default credit type (Uses normal priority FIFO):
* A limited number of credits are assigned to partitions
* based on processor entitlement. But these credits may be
* over-committed on a system depends on whether the CPUs
* are in shared or dedicated modes - that is, more requests
* may be issued across the system than NX can service at
* once which can result in paste command failure (RMA_busy).
* Then the process has to resend requests or fall-back to
* SW compression.
* Quality of Service (QoS) credit type (Uses high priority FIFO):
* To avoid NX HW contention, the system admins can assign
* QoS credits for each LPAR so that this partition is
* guaranteed access to NX resources. These credits are
* assigned to partitions via the HMC.
* Refer PAPR for more information.
*
* Allocate window with QoS credits if user requested. Otherwise
* default credits are used.
*/
if (flags & VAS_TX_WIN_FLAG_QOS_CREDIT)
caps = &vascaps[VAS_GZIP_QOS_FEAT_TYPE];
else
caps = &vascaps[VAS_GZIP_DEF_FEAT_TYPE];
cop_feat_caps = &caps->caps;
if (atomic_inc_return(&cop_feat_caps->nr_used_credits) >
atomic_read(&cop_feat_caps->nr_total_credits)) {
pr_err("Credits are not available to allocate window\n");
rc = -EINVAL;
goto out;
}
if (vas_id == -1) {
/*
* The user space is requesting to allocate a window on
* a VAS instance where the process is executing.
* On PowerVM, domain values are passed to the hypervisor
* to select VAS instance. Useful if the process is
* affinity to NUMA node.
* The hypervisor selects VAS instance if
* VAS_DEFAULT_DOMAIN_ID (-1) is passed for domain values.
* The h_allocate_vas_window hcall is defined to take a
* domain values as specified by h_home_node_associativity,
* So no unpacking needs to be done.
*/
rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, domain,
VPHN_FLAG_VCPU, hard_smp_processor_id());
if (rc != H_SUCCESS) {
pr_err("H_HOME_NODE_ASSOCIATIVITY error: %d\n", rc);
goto out;
}
}
txwin->pid = mfspr(SPRN_PID);
/*
* Allocate / Deallocate window hcalls and setup / free IRQs
* have to be protected with mutex.
* Open VAS window: Allocate window hcall and setup IRQ
* Close VAS window: Deallocate window hcall and free IRQ
* The hypervisor waits until all NX requests are
* completed before closing the window. So expects OS
* to handle NX faults, means IRQ can be freed only
* after the deallocate window hcall is returned.
* So once the window is closed with deallocate hcall before
* the IRQ is freed, it can be assigned to new allocate
* hcall with the same fault IRQ by the hypervisor. It can
* result in setup IRQ fail for the new window since the
* same fault IRQ is not freed by the OS before.
*/
mutex_lock(&vas_pseries_mutex);
if (migration_in_progress)
rc = -EBUSY;
else
rc = allocate_setup_window(txwin, (u64 *)&domain[0],
cop_feat_caps->win_type);
mutex_unlock(&vas_pseries_mutex);
if (rc)
goto out;
/*
* Modify window and it is ready to use.
*/
rc = h_modify_vas_window(txwin);
if (!rc)
rc = get_vas_user_win_ref(&txwin->vas_win.task_ref);
if (rc)
goto out_free;
txwin->win_type = cop_feat_caps->win_type;
mutex_lock(&vas_pseries_mutex);
/*
* Possible to lose the acquired credit with DLPAR core
* removal after the window is opened. So if there are any
* closed windows (means with lost credits), do not give new
* window to user space. New windows will be opened only
* after the existing windows are reopened when credits are
* available.
*/
if (!caps->nr_close_wins) {
list_add(&txwin->win_list, &caps->list);
caps->nr_open_windows++;
mutex_unlock(&vas_pseries_mutex);
vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
return &txwin->vas_win;
}
mutex_unlock(&vas_pseries_mutex);
put_vas_user_win_ref(&txwin->vas_win.task_ref);
rc = -EBUSY;
pr_err("No credit is available to allocate window\n");
out_free:
/*
* Window is not operational. Free IRQ before closing
* window so that do not have to hold mutex.
*/
free_irq_setup(txwin);
h_deallocate_vas_window(txwin->vas_win.winid);
out:
atomic_dec(&cop_feat_caps->nr_used_credits);
kfree(txwin);
return ERR_PTR(rc);
}
static u64 vas_paste_address(struct vas_window *vwin)
{
struct pseries_vas_window *win;
win = container_of(vwin, struct pseries_vas_window, vas_win);
return win->win_addr;
}
static int deallocate_free_window(struct pseries_vas_window *win)
{
int rc = 0;
/*
* The hypervisor waits for all requests including faults
* are processed before closing the window - Means all
* credits have to be returned. In the case of fault
* request, a credit is returned after OS issues
* H_GET_NX_FAULT hcall.
* So free IRQ after executing H_DEALLOCATE_VAS_WINDOW
* hcall.
*/
rc = h_deallocate_vas_window(win->vas_win.winid);
if (!rc)
free_irq_setup(win);
return rc;
}
static int vas_deallocate_window(struct vas_window *vwin)
{
struct pseries_vas_window *win;
struct vas_cop_feat_caps *caps;
int rc = 0;
if (!vwin)
return -EINVAL;
win = container_of(vwin, struct pseries_vas_window, vas_win);
/* Should not happen */
if (win->win_type >= VAS_MAX_FEAT_TYPE) {
pr_err("Window (%u): Invalid window type %u\n",
vwin->winid, win->win_type);
return -EINVAL;
}
caps = &vascaps[win->win_type].caps;
mutex_lock(&vas_pseries_mutex);
/*
* VAS window is already closed in the hypervisor when
* lost the credit or with migration. So just remove the entry
* from the list, remove task references and free vas_window
* struct.
*/
if (!(win->vas_win.status & VAS_WIN_NO_CRED_CLOSE) &&
!(win->vas_win.status & VAS_WIN_MIGRATE_CLOSE)) {
rc = deallocate_free_window(win);
if (rc) {
mutex_unlock(&vas_pseries_mutex);
return rc;
}
} else
vascaps[win->win_type].nr_close_wins--;
list_del(&win->win_list);
atomic_dec(&caps->nr_used_credits);
vascaps[win->win_type].nr_open_windows--;
mutex_unlock(&vas_pseries_mutex);
mm_context_remove_vas_window(vwin->task_ref.mm);
put_vas_user_win_ref(&vwin->task_ref);
kfree(win);
return 0;
}
static const struct vas_user_win_ops vops_pseries = {
.open_win = vas_allocate_window, /* Open and configure window */
.paste_addr = vas_paste_address, /* To do copy/paste */
.close_win = vas_deallocate_window, /* Close window */
};
/*
* Supporting only nx-gzip coprocessor type now, but this API code
* extended to other coprocessor types later.
*/
int vas_register_api_pseries(struct module *mod, enum vas_cop_type cop_type,
const char *name)
{
if (!copypaste_feat)
return -ENOTSUPP;
return vas_register_coproc_api(mod, cop_type, name, &vops_pseries);
}
EXPORT_SYMBOL_GPL(vas_register_api_pseries);
void vas_unregister_api_pseries(void)
{
vas_unregister_coproc_api();
}
EXPORT_SYMBOL_GPL(vas_unregister_api_pseries);
/*
* Get the specific capabilities based on the feature type.
* Right now supports GZIP default and GZIP QoS capabilities.
*/
static int __init get_vas_capabilities(u8 feat, enum vas_cop_feat_type type,
struct hv_vas_cop_feat_caps *hv_caps)
{
struct vas_cop_feat_caps *caps;
struct vas_caps *vcaps;
int rc = 0;
vcaps = &vascaps[type];
memset(vcaps, 0, sizeof(*vcaps));
INIT_LIST_HEAD(&vcaps->list);
vcaps->feat = feat;
caps = &vcaps->caps;
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, feat,
(u64)virt_to_phys(hv_caps));
if (rc)
return rc;
caps->user_mode = hv_caps->user_mode;
if (!(caps->user_mode & VAS_COPY_PASTE_USER_MODE)) {
pr_err("User space COPY/PASTE is not supported\n");
return -ENOTSUPP;
}
caps->descriptor = be64_to_cpu(hv_caps->descriptor);
caps->win_type = hv_caps->win_type;
if (caps->win_type >= VAS_MAX_FEAT_TYPE) {
pr_err("Unsupported window type %u\n", caps->win_type);
return -EINVAL;
}
caps->max_lpar_creds = be16_to_cpu(hv_caps->max_lpar_creds);
caps->max_win_creds = be16_to_cpu(hv_caps->max_win_creds);
atomic_set(&caps->nr_total_credits,
be16_to_cpu(hv_caps->target_lpar_creds));
if (feat == VAS_GZIP_DEF_FEAT) {
caps->def_lpar_creds = be16_to_cpu(hv_caps->def_lpar_creds);
if (caps->max_win_creds < DEF_WIN_CREDS) {
pr_err("Window creds(%u) > max allowed window creds(%u)\n",
DEF_WIN_CREDS, caps->max_win_creds);
return -EINVAL;
}
}
rc = sysfs_add_vas_caps(caps);
if (rc)
return rc;
copypaste_feat = true;
return 0;
}
/*
* VAS windows can be closed due to lost credits when the core is
* removed. So reopen them if credits are available due to DLPAR
* core add and set the window active status. When NX sees the page
* fault on the unmapped paste address, the kernel handles the fault
* by setting the remapping to new paste address if the window is
* active.
*/
static int reconfig_open_windows(struct vas_caps *vcaps, int creds,
bool migrate)
{
long domain[PLPAR_HCALL9_BUFSIZE] = {VAS_DEFAULT_DOMAIN_ID};
struct vas_cop_feat_caps *caps = &vcaps->caps;
struct pseries_vas_window *win = NULL, *tmp;
int rc, mv_ents = 0;
int flag;
/*
* Nothing to do if there are no closed windows.
*/
if (!vcaps->nr_close_wins)
return 0;
/*
* For the core removal, the hypervisor reduces the credits
* assigned to the LPAR and the kernel closes VAS windows
* in the hypervisor depends on reduced credits. The kernel
* uses LIFO (the last windows that are opened will be closed
* first) and expects to open in the same order when credits
* are available.
* For example, 40 windows are closed when the LPAR lost 2 cores
* (dedicated). If 1 core is added, this LPAR can have 20 more
* credits. It means the kernel can reopen 20 windows. So move
* 20 entries in the VAS windows lost and reopen next 20 windows.
* For partition migration, reopen all windows that are closed
* during resume.
*/
if ((vcaps->nr_close_wins > creds) && !migrate)
mv_ents = vcaps->nr_close_wins - creds;
list_for_each_entry_safe(win, tmp, &vcaps->list, win_list) {
if (!mv_ents)
break;
mv_ents--;
}
/*
* Open windows if they are closed only with migration or
* DLPAR (lost credit) before.
*/
if (migrate)
flag = VAS_WIN_MIGRATE_CLOSE;
else
flag = VAS_WIN_NO_CRED_CLOSE;
list_for_each_entry_safe_from(win, tmp, &vcaps->list, win_list) {
/*
* This window is closed with DLPAR and migration events.
* So reopen the window with the last event.
* The user space is not suspended with the current
* migration notifier. So the user space can issue DLPAR
* CPU hotplug while migration in progress. In this case
* this window will be opened with the last event.
*/
if ((win->vas_win.status & VAS_WIN_NO_CRED_CLOSE) &&
(win->vas_win.status & VAS_WIN_MIGRATE_CLOSE)) {
win->vas_win.status &= ~flag;
continue;
}
/*
* Nothing to do on this window if it is not closed
* with this flag
*/
if (!(win->vas_win.status & flag))
continue;
rc = allocate_setup_window(win, (u64 *)&domain[0],
caps->win_type);
if (rc)
return rc;
rc = h_modify_vas_window(win);
if (rc)
goto out;
mutex_lock(&win->vas_win.task_ref.mmap_mutex);
/*
* Set window status to active
*/
win->vas_win.status &= ~flag;
mutex_unlock(&win->vas_win.task_ref.mmap_mutex);
win->win_type = caps->win_type;
if (!--vcaps->nr_close_wins)
break;
}
return 0;
out:
/*
* Window modify HCALL failed. So close the window to the
* hypervisor and return.
*/
free_irq_setup(win);
h_deallocate_vas_window(win->vas_win.winid);
return rc;
}
/*
* The hypervisor reduces the available credits if the LPAR lost core. It
* means the excessive windows should not be active and the user space
* should not be using these windows to send compression requests to NX.
* So the kernel closes the excessive windows and unmap the paste address
* such that the user space receives paste instruction failure. Then up to
* the user space to fall back to SW compression and manage with the
* existing windows.
*/
static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
bool migrate)
{
struct pseries_vas_window *win, *tmp;
struct vas_user_win_ref *task_ref;
struct vm_area_struct *vma;
int rc = 0, flag;
if (migrate)
flag = VAS_WIN_MIGRATE_CLOSE;
else
flag = VAS_WIN_NO_CRED_CLOSE;
list_for_each_entry_safe(win, tmp, &vcap->list, win_list) {
/*
* This window is already closed due to lost credit
* or for migration before. Go for next window.
* For migration, nothing to do since this window
* closed for DLPAR and will be reopened even on
* the destination system with other DLPAR operation.
*/
if ((win->vas_win.status & VAS_WIN_MIGRATE_CLOSE) ||
(win->vas_win.status & VAS_WIN_NO_CRED_CLOSE)) {
win->vas_win.status |= flag;
continue;
}
task_ref = &win->vas_win.task_ref;
/*
* VAS mmap (coproc_mmap()) and its fault handler
* (vas_mmap_fault()) are called after holding mmap lock.
* So hold mmap mutex after mmap_lock to avoid deadlock.
*/
mmap_write_lock(task_ref->mm);
mutex_lock(&task_ref->mmap_mutex);
vma = task_ref->vma;
/*
* Number of available credits are reduced, So select
* and close windows.
*/
win->vas_win.status |= flag;
/*
* vma is set in the original mapping. But this mapping
* is done with mmap() after the window is opened with ioctl.
* so we may not see the original mapping if the core remove
* is done before the original mmap() and after the ioctl.
*/
if (vma)
zap_vma_pages(vma);
mutex_unlock(&task_ref->mmap_mutex);
mmap_write_unlock(task_ref->mm);
/*
* Close VAS window in the hypervisor, but do not
* free vas_window struct since it may be reused
* when the credit is available later (DLPAR with
* adding cores). This struct will be used
* later when the process issued with close(FD).
*/
rc = deallocate_free_window(win);
/*
* This failure is from the hypervisor.
* No way to stop migration for these failures.
* So ignore error and continue closing other windows.
*/
if (rc && !migrate)
return rc;
vcap->nr_close_wins++;
/*
* For migration, do not depend on lpar_creds in case if
* mismatch with the hypervisor value (should not happen).
* So close all active windows in the list and will be
* reopened windows based on the new lpar_creds on the
* destination system during resume.
*/
if (!migrate && !--excess_creds)
break;
}
return 0;
}
/*
* Get new VAS capabilities when the core add/removal configuration
* changes. Reconfig window configurations based on the credits
* availability from this new capabilities.
*/
int vas_reconfig_capabilties(u8 type, int new_nr_creds)
{
struct vas_cop_feat_caps *caps;
int old_nr_creds;
struct vas_caps *vcaps;
int rc = 0, nr_active_wins;
if (type >= VAS_MAX_FEAT_TYPE) {
pr_err("Invalid credit type %d\n", type);
return -EINVAL;
}
vcaps = &vascaps[type];
caps = &vcaps->caps;
mutex_lock(&vas_pseries_mutex);
old_nr_creds = atomic_read(&caps->nr_total_credits);
atomic_set(&caps->nr_total_credits, new_nr_creds);
/*
* The total number of available credits may be decreased or
* increased with DLPAR operation. Means some windows have to be
* closed / reopened. Hold the vas_pseries_mutex so that the
* user space can not open new windows.
*/
if (old_nr_creds < new_nr_creds) {
/*
* If the existing target credits is less than the new
* target, reopen windows if they are closed due to
* the previous DLPAR (core removal).
*/
rc = reconfig_open_windows(vcaps, new_nr_creds - old_nr_creds,
false);
} else {
/*
* # active windows is more than new LPAR available
* credits. So close the excessive windows.
* On pseries, each window will have 1 credit.
*/
nr_active_wins = vcaps->nr_open_windows - vcaps->nr_close_wins;
if (nr_active_wins > new_nr_creds)
rc = reconfig_close_windows(vcaps,
nr_active_wins - new_nr_creds,
false);
}
mutex_unlock(&vas_pseries_mutex);
return rc;
}
int pseries_vas_dlpar_cpu(void)
{
int new_nr_creds, rc;
/*
* NX-GZIP is not enabled. Nothing to do for DLPAR event
*/
if (!copypaste_feat)
return 0;
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
(u64)virt_to_phys(&hv_cop_caps));
if (!rc) {
new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE, new_nr_creds);
}
if (rc)
pr_err("Failed reconfig VAS capabilities with DLPAR\n");
return rc;
}
/*
* Total number of default credits available (target_credits)
* in LPAR depends on number of cores configured. It varies based on
* whether processors are in shared mode or dedicated mode.
* Get the notifier when CPU configuration is changed with DLPAR
* operation so that get the new target_credits (vas default capabilities)
* and then update the existing windows usage if needed.
*/
static int pseries_vas_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct of_reconfig_data *rd = data;
struct device_node *dn = rd->dn;
const __be32 *intserv = NULL;
int len;
/*
* For shared CPU partition, the hypervisor assigns total credits
* based on entitled core capacity. So updating VAS windows will
* be called from lparcfg_write().
*/
if (is_shared_processor())
return NOTIFY_OK;
if ((action == OF_RECONFIG_ATTACH_NODE) ||
(action == OF_RECONFIG_DETACH_NODE))
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
&len);
/*
* Processor config is not changed
*/
if (!intserv)
return NOTIFY_OK;
return pseries_vas_dlpar_cpu();
}
static struct notifier_block pseries_vas_nb = {
.notifier_call = pseries_vas_notifier,
};
/*
* For LPM, all windows have to be closed on the source partition
* before migration and reopen them on the destination partition
* after migration. So closing windows during suspend and
* reopen them during resume.
*/
int vas_migration_handler(int action)
{
struct vas_cop_feat_caps *caps;
int old_nr_creds, new_nr_creds = 0;
struct vas_caps *vcaps;
int i, rc = 0;
/*
* NX-GZIP is not enabled. Nothing to do for migration.
*/
if (!copypaste_feat)
return rc;
mutex_lock(&vas_pseries_mutex);
if (action == VAS_SUSPEND)
migration_in_progress = true;
else
migration_in_progress = false;
for (i = 0; i < VAS_MAX_FEAT_TYPE; i++) {
vcaps = &vascaps[i];
caps = &vcaps->caps;
old_nr_creds = atomic_read(&caps->nr_total_credits);
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
vcaps->feat,
(u64)virt_to_phys(&hv_cop_caps));
if (!rc) {
new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
/*
* Should not happen. But incase print messages, close
* all windows in the list during suspend and reopen
* windows based on new lpar_creds on the destination
* system.
*/
if (old_nr_creds != new_nr_creds) {
pr_err("Target credits mismatch with the hypervisor\n");
pr_err("state(%d): lpar creds: %d HV lpar creds: %d\n",
action, old_nr_creds, new_nr_creds);
pr_err("Used creds: %d, Active creds: %d\n",
atomic_read(&caps->nr_used_credits),
vcaps->nr_open_windows - vcaps->nr_close_wins);
}
} else {
pr_err("state(%d): Get VAS capabilities failed with %d\n",
action, rc);
/*
* We can not stop migration with the current lpm
* implementation. So continue closing all windows in
* the list (during suspend) and return without
* opening windows (during resume) if VAS capabilities
* HCALL failed.
*/
if (action == VAS_RESUME)
goto out;
}
switch (action) {
case VAS_SUSPEND:
rc = reconfig_close_windows(vcaps, vcaps->nr_open_windows,
true);
break;
case VAS_RESUME:
atomic_set(&caps->nr_total_credits, new_nr_creds);
rc = reconfig_open_windows(vcaps, new_nr_creds, true);
break;
default:
/* should not happen */
pr_err("Invalid migration action %d\n", action);
rc = -EINVAL;
goto out;
}
/*
* Ignore errors during suspend and return for resume.
*/
if (rc && (action == VAS_RESUME))
goto out;
}
out:
mutex_unlock(&vas_pseries_mutex);
return rc;
}
static int __init pseries_vas_init(void)
{
struct hv_vas_all_caps *hv_caps;
int rc = 0;
/*
* Linux supports user space COPY/PASTE only with Radix
*/
if (!radix_enabled()) {
copypaste_feat = false;
pr_err("API is supported only with radix page tables\n");
return -ENOTSUPP;
}
hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
if (!hv_caps)
return -ENOMEM;
/*
* Get VAS overall capabilities by passing 0 to feature type.
*/
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, 0,
(u64)virt_to_phys(hv_caps));
if (rc)
goto out;
caps_all.descriptor = be64_to_cpu(hv_caps->descriptor);
caps_all.feat_type = be64_to_cpu(hv_caps->feat_type);
sysfs_pseries_vas_init(&caps_all);
/*
* QOS capabilities available
*/
if (caps_all.feat_type & VAS_GZIP_QOS_FEAT_BIT) {
rc = get_vas_capabilities(VAS_GZIP_QOS_FEAT,
VAS_GZIP_QOS_FEAT_TYPE, &hv_cop_caps);
if (rc)
goto out;
}
/*
* Default capabilities available
*/
if (caps_all.feat_type & VAS_GZIP_DEF_FEAT_BIT)
rc = get_vas_capabilities(VAS_GZIP_DEF_FEAT,
VAS_GZIP_DEF_FEAT_TYPE, &hv_cop_caps);
if (!rc && copypaste_feat) {
if (firmware_has_feature(FW_FEATURE_LPAR))
of_reconfig_notifier_register(&pseries_vas_nb);
pr_info("GZIP feature is available\n");
} else {
/*
* Should not happen, but only when get default
* capabilities HCALL failed. So disable copy paste
* feature.
*/
copypaste_feat = false;
}
out:
kfree(hv_caps);
return rc;
}
machine_device_initcall(pseries, pseries_vas_init);
| linux-master | arch/powerpc/platforms/pseries/vas.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Firmware-Assisted Dump support on POWERVM platform.
*
* Copyright 2011, Mahesh Salgaonkar, IBM Corporation.
* Copyright 2019, Hari Bathini, IBM Corporation.
*/
#define pr_fmt(fmt) "rtas fadump: " fmt
#include <linux/string.h>
#include <linux/memblock.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/crash_dump.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/page.h>
#include <asm/rtas.h>
#include <asm/fadump.h>
#include <asm/fadump-internal.h>
#include "rtas-fadump.h"
static struct rtas_fadump_mem_struct fdm;
static const struct rtas_fadump_mem_struct *fdm_active;
static void rtas_fadump_update_config(struct fw_dump *fadump_conf,
const struct rtas_fadump_mem_struct *fdm)
{
fadump_conf->boot_mem_dest_addr =
be64_to_cpu(fdm->rmr_region.destination_address);
fadump_conf->fadumphdr_addr = (fadump_conf->boot_mem_dest_addr +
fadump_conf->boot_memory_size);
}
/*
* This function is called in the capture kernel to get configuration details
* setup in the first kernel and passed to the f/w.
*/
static void __init rtas_fadump_get_config(struct fw_dump *fadump_conf,
const struct rtas_fadump_mem_struct *fdm)
{
fadump_conf->boot_mem_addr[0] =
be64_to_cpu(fdm->rmr_region.source_address);
fadump_conf->boot_mem_sz[0] = be64_to_cpu(fdm->rmr_region.source_len);
fadump_conf->boot_memory_size = fadump_conf->boot_mem_sz[0];
fadump_conf->boot_mem_top = fadump_conf->boot_memory_size;
fadump_conf->boot_mem_regs_cnt = 1;
/*
* Start address of reserve dump area (permanent reservation) for
* re-registering FADump after dump capture.
*/
fadump_conf->reserve_dump_area_start =
be64_to_cpu(fdm->cpu_state_data.destination_address);
rtas_fadump_update_config(fadump_conf, fdm);
}
static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf)
{
u64 addr = fadump_conf->reserve_dump_area_start;
memset(&fdm, 0, sizeof(struct rtas_fadump_mem_struct));
addr = addr & PAGE_MASK;
fdm.header.dump_format_version = cpu_to_be32(0x00000001);
fdm.header.dump_num_sections = cpu_to_be16(3);
fdm.header.dump_status_flag = 0;
fdm.header.offset_first_dump_section =
cpu_to_be32((u32)offsetof(struct rtas_fadump_mem_struct,
cpu_state_data));
/*
* Fields for disk dump option.
* We are not using disk dump option, hence set these fields to 0.
*/
fdm.header.dd_block_size = 0;
fdm.header.dd_block_offset = 0;
fdm.header.dd_num_blocks = 0;
fdm.header.dd_offset_disk_path = 0;
/* set 0 to disable an automatic dump-reboot. */
fdm.header.max_time_auto = 0;
/* Kernel dump sections */
/* cpu state data section. */
fdm.cpu_state_data.request_flag =
cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
fdm.cpu_state_data.source_data_type =
cpu_to_be16(RTAS_FADUMP_CPU_STATE_DATA);
fdm.cpu_state_data.source_address = 0;
fdm.cpu_state_data.source_len =
cpu_to_be64(fadump_conf->cpu_state_data_size);
fdm.cpu_state_data.destination_address = cpu_to_be64(addr);
addr += fadump_conf->cpu_state_data_size;
/* hpte region section */
fdm.hpte_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
fdm.hpte_region.source_data_type =
cpu_to_be16(RTAS_FADUMP_HPTE_REGION);
fdm.hpte_region.source_address = 0;
fdm.hpte_region.source_len =
cpu_to_be64(fadump_conf->hpte_region_size);
fdm.hpte_region.destination_address = cpu_to_be64(addr);
addr += fadump_conf->hpte_region_size;
/*
* Align boot memory area destination address to page boundary to
* be able to mmap read this area in the vmcore.
*/
addr = PAGE_ALIGN(addr);
/* RMA region section */
fdm.rmr_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
fdm.rmr_region.source_data_type =
cpu_to_be16(RTAS_FADUMP_REAL_MODE_REGION);
fdm.rmr_region.source_address = cpu_to_be64(0);
fdm.rmr_region.source_len = cpu_to_be64(fadump_conf->boot_memory_size);
fdm.rmr_region.destination_address = cpu_to_be64(addr);
addr += fadump_conf->boot_memory_size;
rtas_fadump_update_config(fadump_conf, &fdm);
return addr;
}
static u64 rtas_fadump_get_bootmem_min(void)
{
return RTAS_FADUMP_MIN_BOOT_MEM;
}
static int rtas_fadump_register(struct fw_dump *fadump_conf)
{
unsigned int wait_time;
int rc, err = -EIO;
/* TODO: Add upper time limit for the delay */
do {
rc = rtas_call(fadump_conf->ibm_configure_kernel_dump, 3, 1,
NULL, FADUMP_REGISTER, &fdm,
sizeof(struct rtas_fadump_mem_struct));
wait_time = rtas_busy_delay_time(rc);
if (wait_time)
mdelay(wait_time);
} while (wait_time);
switch (rc) {
case 0:
pr_info("Registration is successful!\n");
fadump_conf->dump_registered = 1;
err = 0;
break;
case -1:
pr_err("Failed to register. Hardware Error(%d).\n", rc);
break;
case -3:
if (!is_fadump_boot_mem_contiguous())
pr_err("Can't have holes in boot memory area.\n");
else if (!is_fadump_reserved_mem_contiguous())
pr_err("Can't have holes in reserved memory area.\n");
pr_err("Failed to register. Parameter Error(%d).\n", rc);
err = -EINVAL;
break;
case -9:
pr_err("Already registered!\n");
fadump_conf->dump_registered = 1;
err = -EEXIST;
break;
default:
pr_err("Failed to register. Unknown Error(%d).\n", rc);
break;
}
return err;
}
static int rtas_fadump_unregister(struct fw_dump *fadump_conf)
{
unsigned int wait_time;
int rc;
/* TODO: Add upper time limit for the delay */
do {
rc = rtas_call(fadump_conf->ibm_configure_kernel_dump, 3, 1,
NULL, FADUMP_UNREGISTER, &fdm,
sizeof(struct rtas_fadump_mem_struct));
wait_time = rtas_busy_delay_time(rc);
if (wait_time)
mdelay(wait_time);
} while (wait_time);
if (rc) {
pr_err("Failed to un-register - unexpected error(%d).\n", rc);
return -EIO;
}
fadump_conf->dump_registered = 0;
return 0;
}
static int rtas_fadump_invalidate(struct fw_dump *fadump_conf)
{
unsigned int wait_time;
int rc;
/* TODO: Add upper time limit for the delay */
do {
rc = rtas_call(fadump_conf->ibm_configure_kernel_dump, 3, 1,
NULL, FADUMP_INVALIDATE, fdm_active,
sizeof(struct rtas_fadump_mem_struct));
wait_time = rtas_busy_delay_time(rc);
if (wait_time)
mdelay(wait_time);
} while (wait_time);
if (rc) {
pr_err("Failed to invalidate - unexpected error (%d).\n", rc);
return -EIO;
}
fadump_conf->dump_active = 0;
fdm_active = NULL;
return 0;
}
#define RTAS_FADUMP_GPR_MASK 0xffffff0000000000
static inline int rtas_fadump_gpr_index(u64 id)
{
char str[3];
int i = -1;
if ((id & RTAS_FADUMP_GPR_MASK) == fadump_str_to_u64("GPR")) {
/* get the digits at the end */
id &= ~RTAS_FADUMP_GPR_MASK;
id >>= 24;
str[2] = '\0';
str[1] = id & 0xff;
str[0] = (id >> 8) & 0xff;
if (kstrtoint(str, 10, &i))
i = -EINVAL;
if (i > 31)
i = -1;
}
return i;
}
static void __init rtas_fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val)
{
int i;
i = rtas_fadump_gpr_index(reg_id);
if (i >= 0)
regs->gpr[i] = (unsigned long)reg_val;
else if (reg_id == fadump_str_to_u64("NIA"))
regs->nip = (unsigned long)reg_val;
else if (reg_id == fadump_str_to_u64("MSR"))
regs->msr = (unsigned long)reg_val;
else if (reg_id == fadump_str_to_u64("CTR"))
regs->ctr = (unsigned long)reg_val;
else if (reg_id == fadump_str_to_u64("LR"))
regs->link = (unsigned long)reg_val;
else if (reg_id == fadump_str_to_u64("XER"))
regs->xer = (unsigned long)reg_val;
else if (reg_id == fadump_str_to_u64("CR"))
regs->ccr = (unsigned long)reg_val;
else if (reg_id == fadump_str_to_u64("DAR"))
regs->dar = (unsigned long)reg_val;
else if (reg_id == fadump_str_to_u64("DSISR"))
regs->dsisr = (unsigned long)reg_val;
}
static struct rtas_fadump_reg_entry* __init
rtas_fadump_read_regs(struct rtas_fadump_reg_entry *reg_entry,
struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
while (be64_to_cpu(reg_entry->reg_id) != fadump_str_to_u64("CPUEND")) {
rtas_fadump_set_regval(regs, be64_to_cpu(reg_entry->reg_id),
be64_to_cpu(reg_entry->reg_value));
reg_entry++;
}
reg_entry++;
return reg_entry;
}
/*
* Read CPU state dump data and convert it into ELF notes.
* The CPU dump starts with magic number "REGSAVE". NumCpusOffset should be
* used to access the data to allow for additional fields to be added without
* affecting compatibility. Each list of registers for a CPU starts with
* "CPUSTRT" and ends with "CPUEND". Each register entry is of 16 bytes,
* 8 Byte ASCII identifier and 8 Byte register value. The register entry
* with identifier "CPUSTRT" and "CPUEND" contains 4 byte cpu id as part
* of register value. For more details refer to PAPR document.
*
* Only for the crashing cpu we ignore the CPU dump data and get exact
* state from fadump crash info structure populated by first kernel at the
* time of crash.
*/
static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf)
{
struct rtas_fadump_reg_save_area_header *reg_header;
struct fadump_crash_info_header *fdh = NULL;
struct rtas_fadump_reg_entry *reg_entry;
u32 num_cpus, *note_buf;
int i, rc = 0, cpu = 0;
struct pt_regs regs;
unsigned long addr;
void *vaddr;
addr = be64_to_cpu(fdm_active->cpu_state_data.destination_address);
vaddr = __va(addr);
reg_header = vaddr;
if (be64_to_cpu(reg_header->magic_number) !=
fadump_str_to_u64("REGSAVE")) {
pr_err("Unable to read register save area.\n");
return -ENOENT;
}
pr_debug("--------CPU State Data------------\n");
pr_debug("Magic Number: %llx\n", be64_to_cpu(reg_header->magic_number));
pr_debug("NumCpuOffset: %x\n", be32_to_cpu(reg_header->num_cpu_offset));
vaddr += be32_to_cpu(reg_header->num_cpu_offset);
num_cpus = be32_to_cpu(*((__be32 *)(vaddr)));
pr_debug("NumCpus : %u\n", num_cpus);
vaddr += sizeof(u32);
reg_entry = (struct rtas_fadump_reg_entry *)vaddr;
rc = fadump_setup_cpu_notes_buf(num_cpus);
if (rc != 0)
return rc;
note_buf = (u32 *)fadump_conf->cpu_notes_buf_vaddr;
if (fadump_conf->fadumphdr_addr)
fdh = __va(fadump_conf->fadumphdr_addr);
for (i = 0; i < num_cpus; i++) {
if (be64_to_cpu(reg_entry->reg_id) !=
fadump_str_to_u64("CPUSTRT")) {
pr_err("Unable to read CPU state data\n");
rc = -ENOENT;
goto error_out;
}
/* Lower 4 bytes of reg_value contains logical cpu id */
cpu = (be64_to_cpu(reg_entry->reg_value) &
RTAS_FADUMP_CPU_ID_MASK);
if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_mask)) {
RTAS_FADUMP_SKIP_TO_NEXT_CPU(reg_entry);
continue;
}
pr_debug("Reading register data for cpu %d...\n", cpu);
if (fdh && fdh->crashing_cpu == cpu) {
regs = fdh->regs;
note_buf = fadump_regs_to_elf_notes(note_buf, ®s);
RTAS_FADUMP_SKIP_TO_NEXT_CPU(reg_entry);
} else {
reg_entry++;
reg_entry = rtas_fadump_read_regs(reg_entry, ®s);
note_buf = fadump_regs_to_elf_notes(note_buf, ®s);
}
}
final_note(note_buf);
if (fdh) {
pr_debug("Updating elfcore header (%llx) with cpu notes\n",
fdh->elfcorehdr_addr);
fadump_update_elfcore_header(__va(fdh->elfcorehdr_addr));
}
return 0;
error_out:
fadump_free_cpu_notes_buf();
return rc;
}
/*
* Validate and process the dump data stored by firmware before exporting
* it through '/proc/vmcore'.
*/
static int __init rtas_fadump_process(struct fw_dump *fadump_conf)
{
struct fadump_crash_info_header *fdh;
int rc = 0;
if (!fdm_active || !fadump_conf->fadumphdr_addr)
return -EINVAL;
/* Check if the dump data is valid. */
if ((be16_to_cpu(fdm_active->header.dump_status_flag) ==
RTAS_FADUMP_ERROR_FLAG) ||
(fdm_active->cpu_state_data.error_flags != 0) ||
(fdm_active->rmr_region.error_flags != 0)) {
pr_err("Dump taken by platform is not valid\n");
return -EINVAL;
}
if ((fdm_active->rmr_region.bytes_dumped !=
fdm_active->rmr_region.source_len) ||
!fdm_active->cpu_state_data.bytes_dumped) {
pr_err("Dump taken by platform is incomplete\n");
return -EINVAL;
}
/* Validate the fadump crash info header */
fdh = __va(fadump_conf->fadumphdr_addr);
if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) {
pr_err("Crash info header is not valid.\n");
return -EINVAL;
}
rc = rtas_fadump_build_cpu_notes(fadump_conf);
if (rc)
return rc;
/*
* We are done validating dump info and elfcore header is now ready
* to be exported. set elfcorehdr_addr so that vmcore module will
* export the elfcore header through '/proc/vmcore'.
*/
elfcorehdr_addr = fdh->elfcorehdr_addr;
return 0;
}
static void rtas_fadump_region_show(struct fw_dump *fadump_conf,
struct seq_file *m)
{
const struct rtas_fadump_section *cpu_data_section;
const struct rtas_fadump_mem_struct *fdm_ptr;
if (fdm_active)
fdm_ptr = fdm_active;
else
fdm_ptr = &fdm;
cpu_data_section = &(fdm_ptr->cpu_state_data);
seq_printf(m, "CPU :[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n",
be64_to_cpu(cpu_data_section->destination_address),
be64_to_cpu(cpu_data_section->destination_address) +
be64_to_cpu(cpu_data_section->source_len) - 1,
be64_to_cpu(cpu_data_section->source_len),
be64_to_cpu(cpu_data_section->bytes_dumped));
seq_printf(m, "HPTE:[%#016llx-%#016llx] %#llx bytes, Dumped: %#llx\n",
be64_to_cpu(fdm_ptr->hpte_region.destination_address),
be64_to_cpu(fdm_ptr->hpte_region.destination_address) +
be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1,
be64_to_cpu(fdm_ptr->hpte_region.source_len),
be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped));
seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ",
be64_to_cpu(fdm_ptr->rmr_region.source_address),
be64_to_cpu(fdm_ptr->rmr_region.destination_address));
seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n",
be64_to_cpu(fdm_ptr->rmr_region.source_len),
be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped));
/* Dump is active. Show preserved area start address. */
if (fdm_active) {
seq_printf(m, "\nMemory above %#016llx is reserved for saving crash dump\n",
fadump_conf->boot_mem_top);
}
}
static void rtas_fadump_trigger(struct fadump_crash_info_header *fdh,
const char *msg)
{
/* Call ibm,os-term rtas call to trigger firmware assisted dump */
rtas_os_term((char *)msg);
}
static struct fadump_ops rtas_fadump_ops = {
.fadump_init_mem_struct = rtas_fadump_init_mem_struct,
.fadump_get_bootmem_min = rtas_fadump_get_bootmem_min,
.fadump_register = rtas_fadump_register,
.fadump_unregister = rtas_fadump_unregister,
.fadump_invalidate = rtas_fadump_invalidate,
.fadump_process = rtas_fadump_process,
.fadump_region_show = rtas_fadump_region_show,
.fadump_trigger = rtas_fadump_trigger,
};
void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
{
int i, size, num_sections;
const __be32 *sections;
const __be32 *token;
/*
* Check if Firmware Assisted dump is supported. if yes, check
* if dump has been initiated on last reboot.
*/
token = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL);
if (!token)
return;
fadump_conf->ibm_configure_kernel_dump = be32_to_cpu(*token);
fadump_conf->ops = &rtas_fadump_ops;
fadump_conf->fadump_supported = 1;
/* Firmware supports 64-bit value for size, align it to pagesize. */
fadump_conf->max_copy_size = ALIGN_DOWN(U64_MAX, PAGE_SIZE);
/*
* The 'ibm,kernel-dump' rtas node is present only if there is
* dump data waiting for us.
*/
fdm_active = of_get_flat_dt_prop(node, "ibm,kernel-dump", NULL);
if (fdm_active) {
pr_info("Firmware-assisted dump is active.\n");
fadump_conf->dump_active = 1;
rtas_fadump_get_config(fadump_conf, (void *)__pa(fdm_active));
}
/* Get the sizes required to store dump data for the firmware provided
* dump sections.
* For each dump section type supported, a 32bit cell which defines
* the ID of a supported section followed by two 32 bit cells which
* gives the size of the section in bytes.
*/
sections = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump-sizes",
&size);
if (!sections)
return;
num_sections = size / (3 * sizeof(u32));
for (i = 0; i < num_sections; i++, sections += 3) {
u32 type = (u32)of_read_number(sections, 1);
switch (type) {
case RTAS_FADUMP_CPU_STATE_DATA:
fadump_conf->cpu_state_data_size =
of_read_ulong(§ions[1], 2);
break;
case RTAS_FADUMP_HPTE_REGION:
fadump_conf->hpte_region_size =
of_read_ulong(§ions[1], 2);
break;
}
}
}
| linux-master | arch/powerpc/platforms/pseries/rtas-fadump.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pSeries_lpar.c
* Copyright (C) 2001 Todd Inglett, IBM Corporation
*
* pSeries LPAR support.
*/
/* Enables debugging of low-level hash table routines - careful! */
#undef DEBUG
#define pr_fmt(fmt) "lpar: " fmt
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/console.h>
#include <linux/export.h>
#include <linux/jump_label.h>
#include <linux/delay.h>
#include <linux/stop_machine.h>
#include <linux/spinlock.h>
#include <linux/cpuhotplug.h>
#include <linux/workqueue.h>
#include <linux/proc_fs.h>
#include <linux/pgtable.h>
#include <linux/debugfs.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
#include <asm/papr-sysparm.h>
#include <asm/udbg.h>
#include <asm/smp.h>
#include <asm/trace.h>
#include <asm/firmware.h>
#include <asm/plpar_wrappers.h>
#include <asm/kexec.h>
#include <asm/fadump.h>
#include <asm/dtl.h>
#include <asm/vphn.h>
#include "pseries.h"
/* Flag bits for H_BULK_REMOVE */
#define HBR_REQUEST 0x4000000000000000UL
#define HBR_RESPONSE 0x8000000000000000UL
#define HBR_END 0xc000000000000000UL
#define HBR_AVPN 0x0200000000000000UL
#define HBR_ANDCOND 0x0100000000000000UL
/* in hvCall.S */
EXPORT_SYMBOL(plpar_hcall);
EXPORT_SYMBOL(plpar_hcall9);
EXPORT_SYMBOL(plpar_hcall_norets);
#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* H_BLOCK_REMOVE supported block size for this page size in segment who's base
* page size is that page size.
*
* The first index is the segment base page size, the second one is the actual
* page size.
*/
static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT] __ro_after_init;
#endif
/*
* Due to the involved complexity, and that the current hypervisor is only
* returning this value or 0, we are limiting the support of the H_BLOCK_REMOVE
* buffer size to 8 size block.
*/
#define HBLKRM_SUPPORTED_BLOCK_SIZE 8
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
static u8 dtl_mask = DTL_LOG_PREEMPT;
#else
static u8 dtl_mask;
#endif
void alloc_dtl_buffers(unsigned long *time_limit)
{
int cpu;
struct paca_struct *pp;
struct dtl_entry *dtl;
for_each_possible_cpu(cpu) {
pp = paca_ptrs[cpu];
if (pp->dispatch_log)
continue;
dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL);
if (!dtl) {
pr_warn("Failed to allocate dispatch trace log for cpu %d\n",
cpu);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
pr_warn("Stolen time statistics will be unreliable\n");
#endif
break;
}
pp->dtl_ridx = 0;
pp->dispatch_log = dtl;
pp->dispatch_log_end = dtl + N_DISPATCH_LOG;
pp->dtl_curr = dtl;
if (time_limit && time_after(jiffies, *time_limit)) {
cond_resched();
*time_limit = jiffies + HZ;
}
}
}
void register_dtl_buffer(int cpu)
{
long ret;
struct paca_struct *pp;
struct dtl_entry *dtl;
int hwcpu = get_hard_smp_processor_id(cpu);
pp = paca_ptrs[cpu];
dtl = pp->dispatch_log;
if (dtl && dtl_mask) {
pp->dtl_ridx = 0;
pp->dtl_curr = dtl;
lppaca_of(cpu).dtl_idx = 0;
/* hypervisor reads buffer length from this field */
dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
ret = register_dtl(hwcpu, __pa(dtl));
if (ret)
pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n",
cpu, hwcpu, ret);
lppaca_of(cpu).dtl_enable_mask = dtl_mask;
}
}
#ifdef CONFIG_PPC_SPLPAR
struct dtl_worker {
struct delayed_work work;
int cpu;
};
struct vcpu_dispatch_data {
int last_disp_cpu;
int total_disp;
int same_cpu_disp;
int same_chip_disp;
int diff_chip_disp;
int far_chip_disp;
int numa_home_disp;
int numa_remote_disp;
int numa_far_disp;
};
/*
* This represents the number of cpus in the hypervisor. Since there is no
* architected way to discover the number of processors in the host, we
* provision for dealing with NR_CPUS. This is currently 2048 by default, and
* is sufficient for our purposes. This will need to be tweaked if
* CONFIG_NR_CPUS is changed.
*/
#define NR_CPUS_H NR_CPUS
DEFINE_RWLOCK(dtl_access_lock);
static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data);
static DEFINE_PER_CPU(u64, dtl_entry_ridx);
static DEFINE_PER_CPU(struct dtl_worker, dtl_workers);
static enum cpuhp_state dtl_worker_state;
static DEFINE_MUTEX(dtl_enable_mutex);
static int vcpudispatch_stats_on __read_mostly;
static int vcpudispatch_stats_freq = 50;
static __be32 *vcpu_associativity, *pcpu_associativity;
static void free_dtl_buffers(unsigned long *time_limit)
{
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
int cpu;
struct paca_struct *pp;
for_each_possible_cpu(cpu) {
pp = paca_ptrs[cpu];
if (!pp->dispatch_log)
continue;
kmem_cache_free(dtl_cache, pp->dispatch_log);
pp->dtl_ridx = 0;
pp->dispatch_log = 0;
pp->dispatch_log_end = 0;
pp->dtl_curr = 0;
if (time_limit && time_after(jiffies, *time_limit)) {
cond_resched();
*time_limit = jiffies + HZ;
}
}
#endif
}
static int init_cpu_associativity(void)
{
vcpu_associativity = kcalloc(num_possible_cpus() / threads_per_core,
VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL);
pcpu_associativity = kcalloc(NR_CPUS_H / threads_per_core,
VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL);
if (!vcpu_associativity || !pcpu_associativity) {
pr_err("error allocating memory for associativity information\n");
return -ENOMEM;
}
return 0;
}
static void destroy_cpu_associativity(void)
{
kfree(vcpu_associativity);
kfree(pcpu_associativity);
vcpu_associativity = pcpu_associativity = 0;
}
static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag)
{
__be32 *assoc;
int rc = 0;
assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE];
if (!assoc[0]) {
rc = hcall_vphn(cpu, flag, &assoc[0]);
if (rc)
return NULL;
}
return assoc;
}
static __be32 *get_pcpu_associativity(int cpu)
{
return __get_cpu_associativity(cpu, pcpu_associativity, VPHN_FLAG_PCPU);
}
static __be32 *get_vcpu_associativity(int cpu)
{
return __get_cpu_associativity(cpu, vcpu_associativity, VPHN_FLAG_VCPU);
}
static int cpu_relative_dispatch_distance(int last_disp_cpu, int cur_disp_cpu)
{
__be32 *last_disp_cpu_assoc, *cur_disp_cpu_assoc;
if (last_disp_cpu >= NR_CPUS_H || cur_disp_cpu >= NR_CPUS_H)
return -EINVAL;
last_disp_cpu_assoc = get_pcpu_associativity(last_disp_cpu);
cur_disp_cpu_assoc = get_pcpu_associativity(cur_disp_cpu);
if (!last_disp_cpu_assoc || !cur_disp_cpu_assoc)
return -EIO;
return cpu_relative_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc);
}
static int cpu_home_node_dispatch_distance(int disp_cpu)
{
__be32 *disp_cpu_assoc, *vcpu_assoc;
int vcpu_id = smp_processor_id();
if (disp_cpu >= NR_CPUS_H) {
pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n",
disp_cpu, NR_CPUS_H);
return -EINVAL;
}
disp_cpu_assoc = get_pcpu_associativity(disp_cpu);
vcpu_assoc = get_vcpu_associativity(vcpu_id);
if (!disp_cpu_assoc || !vcpu_assoc)
return -EIO;
return cpu_relative_distance(disp_cpu_assoc, vcpu_assoc);
}
static void update_vcpu_disp_stat(int disp_cpu)
{
struct vcpu_dispatch_data *disp;
int distance;
disp = this_cpu_ptr(&vcpu_disp_data);
if (disp->last_disp_cpu == -1) {
disp->last_disp_cpu = disp_cpu;
return;
}
disp->total_disp++;
if (disp->last_disp_cpu == disp_cpu ||
(cpu_first_thread_sibling(disp->last_disp_cpu) ==
cpu_first_thread_sibling(disp_cpu)))
disp->same_cpu_disp++;
else {
distance = cpu_relative_dispatch_distance(disp->last_disp_cpu,
disp_cpu);
if (distance < 0)
pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
smp_processor_id());
else {
switch (distance) {
case 0:
disp->same_chip_disp++;
break;
case 1:
disp->diff_chip_disp++;
break;
case 2:
disp->far_chip_disp++;
break;
default:
pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n",
smp_processor_id(),
disp->last_disp_cpu,
disp_cpu,
distance);
}
}
}
distance = cpu_home_node_dispatch_distance(disp_cpu);
if (distance < 0)
pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n",
smp_processor_id());
else {
switch (distance) {
case 0:
disp->numa_home_disp++;
break;
case 1:
disp->numa_remote_disp++;
break;
case 2:
disp->numa_far_disp++;
break;
default:
pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n",
smp_processor_id(),
disp_cpu,
distance);
}
}
disp->last_disp_cpu = disp_cpu;
}
static void process_dtl_buffer(struct work_struct *work)
{
struct dtl_entry dtle;
u64 i = __this_cpu_read(dtl_entry_ridx);
struct dtl_entry *dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
struct lppaca *vpa = local_paca->lppaca_ptr;
struct dtl_worker *d = container_of(work, struct dtl_worker, work.work);
if (!local_paca->dispatch_log)
return;
/* if we have been migrated away, we cancel ourself */
if (d->cpu != smp_processor_id()) {
pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n",
smp_processor_id());
return;
}
if (i == be64_to_cpu(vpa->dtl_idx))
goto out;
while (i < be64_to_cpu(vpa->dtl_idx)) {
dtle = *dtl;
barrier();
if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
/* buffer has overflowed */
pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n",
d->cpu,
be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG - i);
i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
continue;
}
update_vcpu_disp_stat(be16_to_cpu(dtle.processor_id));
++i;
++dtl;
if (dtl == dtl_end)
dtl = local_paca->dispatch_log;
}
__this_cpu_write(dtl_entry_ridx, i);
out:
schedule_delayed_work_on(d->cpu, to_delayed_work(work),
HZ / vcpudispatch_stats_freq);
}
static int dtl_worker_online(unsigned int cpu)
{
struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
memset(d, 0, sizeof(*d));
INIT_DELAYED_WORK(&d->work, process_dtl_buffer);
d->cpu = cpu;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
per_cpu(dtl_entry_ridx, cpu) = 0;
register_dtl_buffer(cpu);
#else
per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx);
#endif
schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq);
return 0;
}
static int dtl_worker_offline(unsigned int cpu)
{
struct dtl_worker *d = &per_cpu(dtl_workers, cpu);
cancel_delayed_work_sync(&d->work);
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
unregister_dtl(get_hard_smp_processor_id(cpu));
#endif
return 0;
}
static void set_global_dtl_mask(u8 mask)
{
int cpu;
dtl_mask = mask;
for_each_present_cpu(cpu)
lppaca_of(cpu).dtl_enable_mask = dtl_mask;
}
static void reset_global_dtl_mask(void)
{
int cpu;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
dtl_mask = DTL_LOG_PREEMPT;
#else
dtl_mask = 0;
#endif
for_each_present_cpu(cpu)
lppaca_of(cpu).dtl_enable_mask = dtl_mask;
}
static int dtl_worker_enable(unsigned long *time_limit)
{
int rc = 0, state;
if (!write_trylock(&dtl_access_lock)) {
rc = -EBUSY;
goto out;
}
set_global_dtl_mask(DTL_LOG_ALL);
/* Setup dtl buffers and register those */
alloc_dtl_buffers(time_limit);
state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/dtl:online",
dtl_worker_online, dtl_worker_offline);
if (state < 0) {
pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n");
free_dtl_buffers(time_limit);
reset_global_dtl_mask();
write_unlock(&dtl_access_lock);
rc = -EINVAL;
goto out;
}
dtl_worker_state = state;
out:
return rc;
}
static void dtl_worker_disable(unsigned long *time_limit)
{
cpuhp_remove_state(dtl_worker_state);
free_dtl_buffers(time_limit);
reset_global_dtl_mask();
write_unlock(&dtl_access_lock);
}
static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p,
size_t count, loff_t *ppos)
{
unsigned long time_limit = jiffies + HZ;
struct vcpu_dispatch_data *disp;
int rc, cmd, cpu;
char buf[16];
if (count > 15)
return -EINVAL;
if (copy_from_user(buf, p, count))
return -EFAULT;
buf[count] = 0;
rc = kstrtoint(buf, 0, &cmd);
if (rc || cmd < 0 || cmd > 1) {
pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n");
return rc ? rc : -EINVAL;
}
mutex_lock(&dtl_enable_mutex);
if ((cmd == 0 && !vcpudispatch_stats_on) ||
(cmd == 1 && vcpudispatch_stats_on))
goto out;
if (cmd) {
rc = init_cpu_associativity();
if (rc)
goto out;
for_each_possible_cpu(cpu) {
disp = per_cpu_ptr(&vcpu_disp_data, cpu);
memset(disp, 0, sizeof(*disp));
disp->last_disp_cpu = -1;
}
rc = dtl_worker_enable(&time_limit);
if (rc) {
destroy_cpu_associativity();
goto out;
}
} else {
dtl_worker_disable(&time_limit);
destroy_cpu_associativity();
}
vcpudispatch_stats_on = cmd;
out:
mutex_unlock(&dtl_enable_mutex);
if (rc)
return rc;
return count;
}
static int vcpudispatch_stats_display(struct seq_file *p, void *v)
{
int cpu;
struct vcpu_dispatch_data *disp;
if (!vcpudispatch_stats_on) {
seq_puts(p, "off\n");
return 0;
}
for_each_online_cpu(cpu) {
disp = per_cpu_ptr(&vcpu_disp_data, cpu);
seq_printf(p, "cpu%d", cpu);
seq_put_decimal_ull(p, " ", disp->total_disp);
seq_put_decimal_ull(p, " ", disp->same_cpu_disp);
seq_put_decimal_ull(p, " ", disp->same_chip_disp);
seq_put_decimal_ull(p, " ", disp->diff_chip_disp);
seq_put_decimal_ull(p, " ", disp->far_chip_disp);
seq_put_decimal_ull(p, " ", disp->numa_home_disp);
seq_put_decimal_ull(p, " ", disp->numa_remote_disp);
seq_put_decimal_ull(p, " ", disp->numa_far_disp);
seq_puts(p, "\n");
}
return 0;
}
static int vcpudispatch_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, vcpudispatch_stats_display, NULL);
}
static const struct proc_ops vcpudispatch_stats_proc_ops = {
.proc_open = vcpudispatch_stats_open,
.proc_read = seq_read,
.proc_write = vcpudispatch_stats_write,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
static ssize_t vcpudispatch_stats_freq_write(struct file *file,
const char __user *p, size_t count, loff_t *ppos)
{
int rc, freq;
char buf[16];
if (count > 15)
return -EINVAL;
if (copy_from_user(buf, p, count))
return -EFAULT;
buf[count] = 0;
rc = kstrtoint(buf, 0, &freq);
if (rc || freq < 1 || freq > HZ) {
pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n",
HZ);
return rc ? rc : -EINVAL;
}
vcpudispatch_stats_freq = freq;
return count;
}
static int vcpudispatch_stats_freq_display(struct seq_file *p, void *v)
{
seq_printf(p, "%d\n", vcpudispatch_stats_freq);
return 0;
}
static int vcpudispatch_stats_freq_open(struct inode *inode, struct file *file)
{
return single_open(file, vcpudispatch_stats_freq_display, NULL);
}
static const struct proc_ops vcpudispatch_stats_freq_proc_ops = {
.proc_open = vcpudispatch_stats_freq_open,
.proc_read = seq_read,
.proc_write = vcpudispatch_stats_freq_write,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
static int __init vcpudispatch_stats_procfs_init(void)
{
if (!lppaca_shared_proc())
return 0;
if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL,
&vcpudispatch_stats_proc_ops))
pr_err("vcpudispatch_stats: error creating procfs file\n");
else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL,
&vcpudispatch_stats_freq_proc_ops))
pr_err("vcpudispatch_stats_freq: error creating procfs file\n");
return 0;
}
machine_device_initcall(pseries, vcpudispatch_stats_procfs_init);
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
u64 pseries_paravirt_steal_clock(int cpu)
{
struct lppaca *lppaca = &lppaca_of(cpu);
return be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) +
be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb));
}
#endif
#endif /* CONFIG_PPC_SPLPAR */
void vpa_init(int cpu)
{
int hwcpu = get_hard_smp_processor_id(cpu);
unsigned long addr;
long ret;
/*
* The spec says it "may be problematic" if CPU x registers the VPA of
* CPU y. We should never do that, but wail if we ever do.
*/
WARN_ON(cpu != smp_processor_id());
if (cpu_has_feature(CPU_FTR_ALTIVEC))
lppaca_of(cpu).vmxregs_in_use = 1;
if (cpu_has_feature(CPU_FTR_ARCH_207S))
lppaca_of(cpu).ebb_regs_in_use = 1;
addr = __pa(&lppaca_of(cpu));
ret = register_vpa(hwcpu, addr);
if (ret) {
pr_err("WARNING: VPA registration for cpu %d (hw %d) of area "
"%lx failed with %ld\n", cpu, hwcpu, addr, ret);
return;
}
#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* PAPR says this feature is SLB-Buffer but firmware never
* reports that. All SPLPAR support SLB shadow buffer.
*/
if (!radix_enabled() && firmware_has_feature(FW_FEATURE_SPLPAR)) {
addr = __pa(paca_ptrs[cpu]->slb_shadow_ptr);
ret = register_slb_shadow(hwcpu, addr);
if (ret)
pr_err("WARNING: SLB shadow buffer registration for "
"cpu %d (hw %d) of area %lx failed with %ld\n",
cpu, hwcpu, addr, ret);
}
#endif /* CONFIG_PPC_64S_HASH_MMU */
/*
* Register dispatch trace log, if one has been allocated.
*/
register_dtl_buffer(cpu);
}
#ifdef CONFIG_PPC_BOOK3S_64
static int __init pseries_lpar_register_process_table(unsigned long base,
unsigned long page_size, unsigned long table_size)
{
long rc;
unsigned long flags = 0;
if (table_size)
flags |= PROC_TABLE_NEW;
if (radix_enabled()) {
flags |= PROC_TABLE_RADIX;
if (mmu_has_feature(MMU_FTR_GTSE))
flags |= PROC_TABLE_GTSE;
} else
flags |= PROC_TABLE_HPT_SLB;
for (;;) {
rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
page_size, table_size);
if (!H_IS_LONG_BUSY(rc))
break;
mdelay(get_longbusy_msecs(rc));
}
if (rc != H_SUCCESS) {
pr_err("Failed to register process table (rc=%ld)\n", rc);
BUG();
}
return rc;
}
#ifdef CONFIG_PPC_64S_HASH_MMU
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
int psize, int apsize, int ssize)
{
unsigned long lpar_rc;
unsigned long flags;
unsigned long slot;
unsigned long hpte_v, hpte_r;
if (!(vflags & HPTE_V_BOLTED))
pr_devel("hpte_insert(group=%lx, vpn=%016lx, "
"pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n",
hpte_group, vpn, pa, rflags, vflags, psize);
hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
/* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */
/* Zero page = 0 */
/* I-cache Invalidate = 0 */
/* I-cache synchronize = 0 */
/* Exact = 0 */
flags = 0;
if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
flags |= H_COALESCE_CAND;
lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
if (unlikely(lpar_rc == H_PTEG_FULL)) {
pr_devel("Hash table group is full\n");
return -1;
}
/*
* Since we try and ioremap PHBs we don't own, the pte insert
* will fail. However we must catch the failure in hash_page
* or we will loop forever, so return -2 in this case.
*/
if (unlikely(lpar_rc != H_SUCCESS)) {
pr_err("Failed hash pte insert with error %ld\n", lpar_rc);
return -2;
}
if (!(vflags & HPTE_V_BOLTED))
pr_devel(" -> slot: %lu\n", slot & 7);
/* Because of iSeries, we have to pass down the secondary
* bucket bit here as well
*/
return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3);
}
static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock);
static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
{
unsigned long slot_offset;
unsigned long lpar_rc;
int i;
unsigned long dummy1, dummy2;
/* pick a random slot to start at */
slot_offset = mftb() & 0x7;
for (i = 0; i < HPTES_PER_GROUP; i++) {
/* don't remove a bolted entry */
lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset,
HPTE_V_BOLTED, &dummy1, &dummy2);
if (lpar_rc == H_SUCCESS)
return i;
/*
* The test for adjunct partition is performed before the
* ANDCOND test. H_RESOURCE may be returned, so we need to
* check for that as well.
*/
BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
slot_offset++;
slot_offset &= 0x7;
}
return -1;
}
/* Called during kexec sequence with MMU off */
static notrace void manual_hpte_clear_all(void)
{
unsigned long size_bytes = 1UL << ppc64_pft_size;
unsigned long hpte_count = size_bytes >> 4;
struct {
unsigned long pteh;
unsigned long ptel;
} ptes[4];
long lpar_rc;
unsigned long i, j;
/* Read in batches of 4,
* invalidate only valid entries not in the VRMA
* hpte_count will be a multiple of 4
*/
for (i = 0; i < hpte_count; i += 4) {
lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
if (lpar_rc != H_SUCCESS) {
pr_info("Failed to read hash page table at %ld err %ld\n",
i, lpar_rc);
continue;
}
for (j = 0; j < 4; j++){
if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
HPTE_V_VRMA_MASK)
continue;
if (ptes[j].pteh & HPTE_V_VALID)
plpar_pte_remove_raw(0, i + j, 0,
&(ptes[j].pteh), &(ptes[j].ptel));
}
}
}
/* Called during kexec sequence with MMU off */
static notrace int hcall_hpte_clear_all(void)
{
int rc;
do {
rc = plpar_hcall_norets(H_CLEAR_HPT);
} while (rc == H_CONTINUE);
return rc;
}
/* Called during kexec sequence with MMU off */
static notrace void pseries_hpte_clear_all(void)
{
int rc;
rc = hcall_hpte_clear_all();
if (rc != H_SUCCESS)
manual_hpte_clear_all();
#ifdef __LITTLE_ENDIAN__
/*
* Reset exceptions to big endian.
*
* FIXME this is a hack for kexec, we need to reset the exception
* endian before starting the new kernel and this is a convenient place
* to do it.
*
* This is also called on boot when a fadump happens. In that case we
* must not change the exception endian mode.
*/
if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active())
pseries_big_endian_exceptions();
#endif
}
/*
* NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
* the low 3 bits of flags happen to line up. So no transform is needed.
* We can probably optimize here and assume the high bits of newpp are
* already zero. For now I am paranoid.
*/
static long pSeries_lpar_hpte_updatepp(unsigned long slot,
unsigned long newpp,
unsigned long vpn,
int psize, int apsize,
int ssize, unsigned long inv_flags)
{
unsigned long lpar_rc;
unsigned long flags;
unsigned long want_v;
want_v = hpte_encode_avpn(vpn, psize, ssize);
flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
flags |= (newpp & HPTE_R_KEY_HI) >> 48;
if (mmu_has_feature(MMU_FTR_KERNEL_RO))
/* Move pp0 into bit 8 (IBM 55) */
flags |= (newpp & HPTE_R_PP0) >> 55;
pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
want_v, slot, flags, psize);
lpar_rc = plpar_pte_protect(flags, slot, want_v);
if (lpar_rc == H_NOT_FOUND) {
pr_devel("not found !\n");
return -1;
}
pr_devel("ok\n");
BUG_ON(lpar_rc != H_SUCCESS);
return 0;
}
static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
{
long lpar_rc;
unsigned long i, j;
struct {
unsigned long pteh;
unsigned long ptel;
} ptes[4];
for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
if (lpar_rc != H_SUCCESS) {
pr_info("Failed to read hash page table at %ld err %ld\n",
hpte_group, lpar_rc);
continue;
}
for (j = 0; j < 4; j++) {
if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
(ptes[j].pteh & HPTE_V_VALID))
return i + j;
}
}
return -1;
}
static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
{
long slot;
unsigned long hash;
unsigned long want_v;
unsigned long hpte_group;
hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
want_v = hpte_encode_avpn(vpn, psize, ssize);
/*
* We try to keep bolted entries always in primary hash
* But in some case we can find them in secondary too.
*/
hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
if (slot < 0) {
/* Try in secondary */
hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
if (slot < 0)
return -1;
}
return hpte_group + slot;
}
static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
unsigned long ea,
int psize, int ssize)
{
unsigned long vpn;
unsigned long lpar_rc, slot, vsid, flags;
vsid = get_kernel_vsid(ea, ssize);
vpn = hpt_vpn(ea, vsid, ssize);
slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
BUG_ON(slot == -1);
flags = newpp & (HPTE_R_PP | HPTE_R_N);
if (mmu_has_feature(MMU_FTR_KERNEL_RO))
/* Move pp0 into bit 8 (IBM 55) */
flags |= (newpp & HPTE_R_PP0) >> 55;
flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO);
lpar_rc = plpar_pte_protect(flags, slot, 0);
BUG_ON(lpar_rc != H_SUCCESS);
}
static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
int psize, int apsize,
int ssize, int local)
{
unsigned long want_v;
unsigned long lpar_rc;
unsigned long dummy1, dummy2;
pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
slot, vpn, psize, local);
want_v = hpte_encode_avpn(vpn, psize, ssize);
lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
if (lpar_rc == H_NOT_FOUND)
return;
BUG_ON(lpar_rc != H_SUCCESS);
}
/*
* As defined in the PAPR's section 14.5.4.1.8
* The control mask doesn't include the returned reference and change bit from
* the processed PTE.
*/
#define HBLKR_AVPN 0x0100000000000000UL
#define HBLKR_CTRL_MASK 0xf800000000000000UL
#define HBLKR_CTRL_SUCCESS 0x8000000000000000UL
#define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL
#define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL
/*
* Returned true if we are supporting this block size for the specified segment
* base page size and actual page size.
*
* Currently, we only support 8 size block.
*/
static inline bool is_supported_hlbkrm(int bpsize, int psize)
{
return (hblkrm_size[bpsize][psize] == HBLKRM_SUPPORTED_BLOCK_SIZE);
}
/**
* H_BLOCK_REMOVE caller.
* @idx should point to the latest @param entry set with a PTEX.
* If PTE cannot be processed because another CPUs has already locked that
* group, those entries are put back in @param starting at index 1.
* If entries has to be retried and @retry_busy is set to true, these entries
* are retried until success. If @retry_busy is set to false, the returned
* is the number of entries yet to process.
*/
static unsigned long call_block_remove(unsigned long idx, unsigned long *param,
bool retry_busy)
{
unsigned long i, rc, new_idx;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
if (idx < 2) {
pr_warn("Unexpected empty call to H_BLOCK_REMOVE");
return 0;
}
again:
new_idx = 0;
if (idx > PLPAR_HCALL9_BUFSIZE) {
pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx);
idx = PLPAR_HCALL9_BUFSIZE;
} else if (idx < PLPAR_HCALL9_BUFSIZE)
param[idx] = HBR_END;
rc = plpar_hcall9(H_BLOCK_REMOVE, retbuf,
param[0], /* AVA */
param[1], param[2], param[3], param[4], /* TS0-7 */
param[5], param[6], param[7], param[8]);
if (rc == H_SUCCESS)
return 0;
BUG_ON(rc != H_PARTIAL);
/* Check that the unprocessed entries were 'not found' or 'busy' */
for (i = 0; i < idx-1; i++) {
unsigned long ctrl = retbuf[i] & HBLKR_CTRL_MASK;
if (ctrl == HBLKR_CTRL_ERRBUSY) {
param[++new_idx] = param[i+1];
continue;
}
BUG_ON(ctrl != HBLKR_CTRL_SUCCESS
&& ctrl != HBLKR_CTRL_ERRNOTFOUND);
}
/*
* If there were entries found busy, retry these entries if requested,
* of if all the entries have to be retried.
*/
if (new_idx && (retry_busy || new_idx == (PLPAR_HCALL9_BUFSIZE-1))) {
idx = new_idx + 1;
goto again;
}
return new_idx;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
* to make sure that we avoid bouncing the hypervisor tlbie lock.
*/
#define PPC64_HUGE_HPTE_BATCH 12
static void hugepage_block_invalidate(unsigned long *slot, unsigned long *vpn,
int count, int psize, int ssize)
{
unsigned long param[PLPAR_HCALL9_BUFSIZE];
unsigned long shift, current_vpgb, vpgb;
int i, pix = 0;
shift = mmu_psize_defs[psize].shift;
for (i = 0; i < count; i++) {
/*
* Shifting 3 bits more on the right to get a
* 8 pages aligned virtual addresse.
*/
vpgb = (vpn[i] >> (shift - VPN_SHIFT + 3));
if (!pix || vpgb != current_vpgb) {
/*
* Need to start a new 8 pages block, flush
* the current one if needed.
*/
if (pix)
(void)call_block_remove(pix, param, true);
current_vpgb = vpgb;
param[0] = hpte_encode_avpn(vpn[i], psize, ssize);
pix = 1;
}
param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot[i];
if (pix == PLPAR_HCALL9_BUFSIZE) {
pix = call_block_remove(pix, param, false);
/*
* pix = 0 means that all the entries were
* removed, we can start a new block.
* Otherwise, this means that there are entries
* to retry, and pix points to latest one, so
* we should increment it and try to continue
* the same block.
*/
if (pix)
pix++;
}
}
if (pix)
(void)call_block_remove(pix, param, true);
}
static void hugepage_bulk_invalidate(unsigned long *slot, unsigned long *vpn,
int count, int psize, int ssize)
{
unsigned long param[PLPAR_HCALL9_BUFSIZE];
int i = 0, pix = 0, rc;
for (i = 0; i < count; i++) {
if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0,
ssize, 0);
} else {
param[pix] = HBR_REQUEST | HBR_AVPN | slot[i];
param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize);
pix += 2;
if (pix == 8) {
rc = plpar_hcall9(H_BULK_REMOVE, param,
param[0], param[1], param[2],
param[3], param[4], param[5],
param[6], param[7]);
BUG_ON(rc != H_SUCCESS);
pix = 0;
}
}
}
if (pix) {
param[pix] = HBR_END;
rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
param[2], param[3], param[4], param[5],
param[6], param[7]);
BUG_ON(rc != H_SUCCESS);
}
}
static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
unsigned long *vpn,
int count, int psize,
int ssize)
{
unsigned long flags = 0;
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
if (lock_tlbie)
spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
/* Assuming THP size is 16M */
if (is_supported_hlbkrm(psize, MMU_PAGE_16M))
hugepage_block_invalidate(slot, vpn, count, psize, ssize);
else
hugepage_bulk_invalidate(slot, vpn, count, psize, ssize);
if (lock_tlbie)
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}
static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
unsigned long addr,
unsigned char *hpte_slot_array,
int psize, int ssize, int local)
{
int i, index = 0;
unsigned long s_addr = addr;
unsigned int max_hpte_count, valid;
unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
unsigned long shift, hidx, vpn = 0, hash, slot;
shift = mmu_psize_defs[psize].shift;
max_hpte_count = 1U << (PMD_SHIFT - shift);
for (i = 0; i < max_hpte_count; i++) {
valid = hpte_valid(hpte_slot_array, i);
if (!valid)
continue;
hidx = hpte_hash_index(hpte_slot_array, i);
/* get the vpn */
addr = s_addr + (i * (1ul << shift));
vpn = hpt_vpn(addr, vsid, ssize);
hash = hpt_hash(vpn, shift, ssize);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
slot_array[index] = slot;
vpn_array[index] = vpn;
if (index == PPC64_HUGE_HPTE_BATCH - 1) {
/*
* Now do a bluk invalidate
*/
__pSeries_lpar_hugepage_invalidate(slot_array,
vpn_array,
PPC64_HUGE_HPTE_BATCH,
psize, ssize);
index = 0;
} else
index++;
}
if (index)
__pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
index, psize, ssize);
}
#else
static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
unsigned long addr,
unsigned char *hpte_slot_array,
int psize, int ssize, int local)
{
WARN(1, "%s called without THP support\n", __func__);
}
#endif
static int pSeries_lpar_hpte_removebolted(unsigned long ea,
int psize, int ssize)
{
unsigned long vpn;
unsigned long slot, vsid;
vsid = get_kernel_vsid(ea, ssize);
vpn = hpt_vpn(ea, vsid, ssize);
slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
if (slot == -1)
return -ENOENT;
/*
* lpar doesn't use the passed actual page size
*/
pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0);
return 0;
}
static inline unsigned long compute_slot(real_pte_t pte,
unsigned long vpn,
unsigned long index,
unsigned long shift,
int ssize)
{
unsigned long slot, hash, hidx;
hash = hpt_hash(vpn, shift, ssize);
hidx = __rpte_to_hidx(pte, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
return slot;
}
/**
* The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are
* "all within the same naturally aligned 8 page virtual address block".
*/
static void do_block_remove(unsigned long number, struct ppc64_tlb_batch *batch,
unsigned long *param)
{
unsigned long vpn;
unsigned long i, pix = 0;
unsigned long index, shift, slot, current_vpgb, vpgb;
real_pte_t pte;
int psize, ssize;
psize = batch->psize;
ssize = batch->ssize;
for (i = 0; i < number; i++) {
vpn = batch->vpn[i];
pte = batch->pte[i];
pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
/*
* Shifting 3 bits more on the right to get a
* 8 pages aligned virtual addresse.
*/
vpgb = (vpn >> (shift - VPN_SHIFT + 3));
if (!pix || vpgb != current_vpgb) {
/*
* Need to start a new 8 pages block, flush
* the current one if needed.
*/
if (pix)
(void)call_block_remove(pix, param,
true);
current_vpgb = vpgb;
param[0] = hpte_encode_avpn(vpn, psize,
ssize);
pix = 1;
}
slot = compute_slot(pte, vpn, index, shift, ssize);
param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot;
if (pix == PLPAR_HCALL9_BUFSIZE) {
pix = call_block_remove(pix, param, false);
/*
* pix = 0 means that all the entries were
* removed, we can start a new block.
* Otherwise, this means that there are entries
* to retry, and pix points to latest one, so
* we should increment it and try to continue
* the same block.
*/
if (pix)
pix++;
}
} pte_iterate_hashed_end();
}
if (pix)
(void)call_block_remove(pix, param, true);
}
/*
* TLB Block Invalidate Characteristics
*
* These characteristics define the size of the block the hcall H_BLOCK_REMOVE
* is able to process for each couple segment base page size, actual page size.
*
* The ibm,get-system-parameter properties is returning a buffer with the
* following layout:
*
* [ 2 bytes size of the RTAS buffer (excluding these 2 bytes) ]
* -----------------
* TLB Block Invalidate Specifiers:
* [ 1 byte LOG base 2 of the TLB invalidate block size being specified ]
* [ 1 byte Number of page sizes (N) that are supported for the specified
* TLB invalidate block size ]
* [ 1 byte Encoded segment base page size and actual page size
* MSB=0 means 4k segment base page size and actual page size
* MSB=1 the penc value in mmu_psize_def ]
* ...
* -----------------
* Next TLB Block Invalidate Specifiers...
* -----------------
* [ 0 ]
*/
static inline void set_hblkrm_bloc_size(int bpsize, int psize,
unsigned int block_size)
{
if (block_size > hblkrm_size[bpsize][psize])
hblkrm_size[bpsize][psize] = block_size;
}
/*
* Decode the Encoded segment base page size and actual page size.
* PAPR specifies:
* - bit 7 is the L bit
* - bits 0-5 are the penc value
* If the L bit is 0, this means 4K segment base page size and actual page size
* otherwise the penc value should be read.
*/
#define HBLKRM_L_MASK 0x80
#define HBLKRM_PENC_MASK 0x3f
static inline void __init check_lp_set_hblkrm(unsigned int lp,
unsigned int block_size)
{
unsigned int bpsize, psize;
/* First, check the L bit, if not set, this means 4K */
if ((lp & HBLKRM_L_MASK) == 0) {
set_hblkrm_bloc_size(MMU_PAGE_4K, MMU_PAGE_4K, block_size);
return;
}
lp &= HBLKRM_PENC_MASK;
for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) {
struct mmu_psize_def *def = &mmu_psize_defs[bpsize];
for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
if (def->penc[psize] == lp) {
set_hblkrm_bloc_size(bpsize, psize, block_size);
return;
}
}
}
}
/*
* The size of the TLB Block Invalidate Characteristics is variable. But at the
* maximum it will be the number of possible page sizes *2 + 10 bytes.
* Currently MMU_PAGE_COUNT is 16, which means 42 bytes. Use a cache line size
* (128 bytes) for the buffer to get plenty of space.
*/
#define SPLPAR_TLB_BIC_MAXLENGTH 128
void __init pseries_lpar_read_hblkrm_characteristics(void)
{
static struct papr_sysparm_buf buf __initdata;
int len, idx, bpsize;
if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
return;
if (papr_sysparm_get(PAPR_SYSPARM_TLB_BLOCK_INVALIDATE_ATTRS, &buf))
return;
len = be16_to_cpu(buf.len);
if (len > SPLPAR_TLB_BIC_MAXLENGTH) {
pr_warn("%s too large returned buffer %d", __func__, len);
return;
}
idx = 0;
while (idx < len) {
u8 block_shift = buf.val[idx++];
u32 block_size;
unsigned int npsize;
if (!block_shift)
break;
block_size = 1 << block_shift;
for (npsize = buf.val[idx++];
npsize > 0 && idx < len; npsize--)
check_lp_set_hblkrm((unsigned int)buf.val[idx++],
block_size);
}
for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
for (idx = 0; idx < MMU_PAGE_COUNT; idx++)
if (hblkrm_size[bpsize][idx])
pr_info("H_BLOCK_REMOVE supports base psize:%d psize:%d block size:%d",
bpsize, idx, hblkrm_size[bpsize][idx]);
}
/*
* Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
* lock.
*/
static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
{
unsigned long vpn;
unsigned long i, pix, rc;
unsigned long flags = 0;
struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long param[PLPAR_HCALL9_BUFSIZE];
unsigned long index, shift, slot;
real_pte_t pte;
int psize, ssize;
if (lock_tlbie)
spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
if (is_supported_hlbkrm(batch->psize, batch->psize)) {
do_block_remove(number, batch, param);
goto out;
}
psize = batch->psize;
ssize = batch->ssize;
pix = 0;
for (i = 0; i < number; i++) {
vpn = batch->vpn[i];
pte = batch->pte[i];
pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
slot = compute_slot(pte, vpn, index, shift, ssize);
if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
/*
* lpar doesn't use the passed actual page size
*/
pSeries_lpar_hpte_invalidate(slot, vpn, psize,
0, ssize, local);
} else {
param[pix] = HBR_REQUEST | HBR_AVPN | slot;
param[pix+1] = hpte_encode_avpn(vpn, psize,
ssize);
pix += 2;
if (pix == 8) {
rc = plpar_hcall9(H_BULK_REMOVE, param,
param[0], param[1], param[2],
param[3], param[4], param[5],
param[6], param[7]);
BUG_ON(rc != H_SUCCESS);
pix = 0;
}
}
} pte_iterate_hashed_end();
}
if (pix) {
param[pix] = HBR_END;
rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1],
param[2], param[3], param[4], param[5],
param[6], param[7]);
BUG_ON(rc != H_SUCCESS);
}
out:
if (lock_tlbie)
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}
static int __init disable_bulk_remove(char *str)
{
if (strcmp(str, "off") == 0 &&
firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
pr_info("Disabling BULK_REMOVE firmware feature");
powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
}
return 1;
}
__setup("bulk_remove=", disable_bulk_remove);
#define HPT_RESIZE_TIMEOUT 10000 /* ms */
struct hpt_resize_state {
unsigned long shift;
int commit_rc;
};
static int pseries_lpar_resize_hpt_commit(void *data)
{
struct hpt_resize_state *state = data;
state->commit_rc = plpar_resize_hpt_commit(0, state->shift);
if (state->commit_rc != H_SUCCESS)
return -EIO;
/* Hypervisor has transitioned the HTAB, update our globals */
ppc64_pft_size = state->shift;
htab_size_bytes = 1UL << ppc64_pft_size;
htab_hash_mask = (htab_size_bytes >> 7) - 1;
return 0;
}
/*
* Must be called in process context. The caller must hold the
* cpus_lock.
*/
static int pseries_lpar_resize_hpt(unsigned long shift)
{
struct hpt_resize_state state = {
.shift = shift,
.commit_rc = H_FUNCTION,
};
unsigned int delay, total_delay = 0;
int rc;
ktime_t t0, t1, t2;
might_sleep();
if (!firmware_has_feature(FW_FEATURE_HPT_RESIZE))
return -ENODEV;
pr_info("Attempting to resize HPT to shift %lu\n", shift);
t0 = ktime_get();
rc = plpar_resize_hpt_prepare(0, shift);
while (H_IS_LONG_BUSY(rc)) {
delay = get_longbusy_msecs(rc);
total_delay += delay;
if (total_delay > HPT_RESIZE_TIMEOUT) {
/* prepare with shift==0 cancels an in-progress resize */
rc = plpar_resize_hpt_prepare(0, 0);
if (rc != H_SUCCESS)
pr_warn("Unexpected error %d cancelling timed out HPT resize\n",
rc);
return -ETIMEDOUT;
}
msleep(delay);
rc = plpar_resize_hpt_prepare(0, shift);
}
switch (rc) {
case H_SUCCESS:
/* Continue on */
break;
case H_PARAMETER:
pr_warn("Invalid argument from H_RESIZE_HPT_PREPARE\n");
return -EINVAL;
case H_RESOURCE:
pr_warn("Operation not permitted from H_RESIZE_HPT_PREPARE\n");
return -EPERM;
default:
pr_warn("Unexpected error %d from H_RESIZE_HPT_PREPARE\n", rc);
return -EIO;
}
t1 = ktime_get();
rc = stop_machine_cpuslocked(pseries_lpar_resize_hpt_commit,
&state, NULL);
t2 = ktime_get();
if (rc != 0) {
switch (state.commit_rc) {
case H_PTEG_FULL:
return -ENOSPC;
default:
pr_warn("Unexpected error %d from H_RESIZE_HPT_COMMIT\n",
state.commit_rc);
return -EIO;
};
}
pr_info("HPT resize to shift %lu complete (%lld ms / %lld ms)\n",
shift, (long long) ktime_ms_delta(t1, t0),
(long long) ktime_ms_delta(t2, t1));
return 0;
}
void __init hpte_init_pseries(void)
{
mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate;
mmu_hash_ops.hpte_updatepp = pSeries_lpar_hpte_updatepp;
mmu_hash_ops.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp;
mmu_hash_ops.hpte_insert = pSeries_lpar_hpte_insert;
mmu_hash_ops.hpte_remove = pSeries_lpar_hpte_remove;
mmu_hash_ops.hpte_removebolted = pSeries_lpar_hpte_removebolted;
mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
/*
* On POWER9, we need to do a H_REGISTER_PROC_TBL hcall
* to inform the hypervisor that we wish to use the HPT.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
pseries_lpar_register_process_table(0, 0, 0);
}
#endif /* CONFIG_PPC_64S_HASH_MMU */
#ifdef CONFIG_PPC_RADIX_MMU
void __init radix_init_pseries(void)
{
pr_info("Using radix MMU under hypervisor\n");
pseries_lpar_register_process_table(__pa(process_tb),
0, PRTB_SIZE_SHIFT - 12);
}
#endif
#ifdef CONFIG_PPC_SMLPAR
#define CMO_FREE_HINT_DEFAULT 1
static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT;
static int __init cmo_free_hint(char *str)
{
char *parm;
parm = strstrip(str);
if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
pr_info("%s: CMO free page hinting is not active.\n", __func__);
cmo_free_hint_flag = 0;
return 1;
}
cmo_free_hint_flag = 1;
pr_info("%s: CMO free page hinting is active.\n", __func__);
if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
return 1;
return 0;
}
__setup("cmo_free_hint=", cmo_free_hint);
static void pSeries_set_page_state(struct page *page, int order,
unsigned long state)
{
int i, j;
unsigned long cmo_page_sz, addr;
cmo_page_sz = cmo_get_page_size();
addr = __pa((unsigned long)page_address(page));
for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
}
}
void arch_free_page(struct page *page, int order)
{
if (radix_enabled())
return;
if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO))
return;
pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
}
EXPORT_SYMBOL(arch_free_page);
#endif /* CONFIG_PPC_SMLPAR */
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_TRACEPOINTS
#ifdef CONFIG_JUMP_LABEL
struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
int hcall_tracepoint_regfunc(void)
{
static_key_slow_inc(&hcall_tracepoint_key);
return 0;
}
void hcall_tracepoint_unregfunc(void)
{
static_key_slow_dec(&hcall_tracepoint_key);
}
#else
/*
* We optimise our hcall path by placing hcall_tracepoint_refcount
* directly in the TOC so we can check if the hcall tracepoints are
* enabled via a single load.
*/
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
extern long hcall_tracepoint_refcount;
int hcall_tracepoint_regfunc(void)
{
hcall_tracepoint_refcount++;
return 0;
}
void hcall_tracepoint_unregfunc(void)
{
hcall_tracepoint_refcount--;
}
#endif
/*
* Keep track of hcall tracing depth and prevent recursion. Warn if any is
* detected because it may indicate a problem. This will not catch all
* problems with tracing code making hcalls, because the tracing might have
* been invoked from a non-hcall, so the first hcall could recurse into it
* without warning here, but this better than nothing.
*
* Hcalls with specific problems being traced should use the _notrace
* plpar_hcall variants.
*/
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
notrace void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
{
unsigned long flags;
unsigned int *depth;
local_irq_save(flags);
depth = this_cpu_ptr(&hcall_trace_depth);
if (WARN_ON_ONCE(*depth))
goto out;
(*depth)++;
preempt_disable();
trace_hcall_entry(opcode, args);
(*depth)--;
out:
local_irq_restore(flags);
}
notrace void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
{
unsigned long flags;
unsigned int *depth;
local_irq_save(flags);
depth = this_cpu_ptr(&hcall_trace_depth);
if (*depth) /* Don't warn again on the way out */
goto out;
(*depth)++;
trace_hcall_exit(opcode, retval, retbuf);
preempt_enable();
(*depth)--;
out:
local_irq_restore(flags);
}
#endif
/**
* h_get_mpp
* H_GET_MPP hcall returns info in 7 parms
*/
int h_get_mpp(struct hvcall_mpp_data *mpp_data)
{
int rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
rc = plpar_hcall9(H_GET_MPP, retbuf);
mpp_data->entitled_mem = retbuf[0];
mpp_data->mapped_mem = retbuf[1];
mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
mpp_data->pool_num = retbuf[2] & 0xffff;
mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
mpp_data->pool_size = retbuf[4];
mpp_data->loan_request = retbuf[5];
mpp_data->backing_mem = retbuf[6];
return rc;
}
EXPORT_SYMBOL(h_get_mpp);
int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
{
int rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 };
rc = plpar_hcall9(H_GET_MPP_X, retbuf);
mpp_x_data->coalesced_bytes = retbuf[0];
mpp_x_data->pool_coalesced_bytes = retbuf[1];
mpp_x_data->pool_purr_cycles = retbuf[2];
mpp_x_data->pool_spurr_cycles = retbuf[3];
return rc;
}
#ifdef CONFIG_PPC_64S_HASH_MMU
static unsigned long __init vsid_unscramble(unsigned long vsid, int ssize)
{
unsigned long protovsid;
unsigned long va_bits = VA_BITS;
unsigned long modinv, vsid_modulus;
unsigned long max_mod_inv, tmp_modinv;
if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
va_bits = 65;
if (ssize == MMU_SEGSIZE_256M) {
modinv = VSID_MULINV_256M;
vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1);
} else {
modinv = VSID_MULINV_1T;
vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1);
}
/*
* vsid outside our range.
*/
if (vsid >= vsid_modulus)
return 0;
/*
* If modinv is the modular multiplicate inverse of (x % vsid_modulus)
* and vsid = (protovsid * x) % vsid_modulus, then we say:
* protovsid = (vsid * modinv) % vsid_modulus
*/
/* Check if (vsid * modinv) overflow (63 bits) */
max_mod_inv = 0x7fffffffffffffffull / vsid;
if (modinv < max_mod_inv)
return (vsid * modinv) % vsid_modulus;
tmp_modinv = modinv/max_mod_inv;
modinv %= max_mod_inv;
protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus;
protovsid = (protovsid + vsid * modinv) % vsid_modulus;
return protovsid;
}
static int __init reserve_vrma_context_id(void)
{
unsigned long protovsid;
/*
* Reserve context ids which map to reserved virtual addresses. For now
* we only reserve the context id which maps to the VRMA VSID. We ignore
* the addresses in "ibm,adjunct-virtual-addresses" because we don't
* enable adjunct support via the "ibm,client-architecture-support"
* interface.
*/
protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T);
hash__reserve_context_id(protovsid >> ESID_BITS_1T);
return 0;
}
machine_device_initcall(pseries, reserve_vrma_context_id);
#endif
#ifdef CONFIG_DEBUG_FS
/* debugfs file interface for vpa data */
static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len,
loff_t *pos)
{
int cpu = (long)filp->private_data;
struct lppaca *lppaca = &lppaca_of(cpu);
return simple_read_from_buffer(buf, len, pos, lppaca,
sizeof(struct lppaca));
}
static const struct file_operations vpa_fops = {
.open = simple_open,
.read = vpa_file_read,
.llseek = default_llseek,
};
static int __init vpa_debugfs_init(void)
{
char name[16];
long i;
struct dentry *vpa_dir;
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;
vpa_dir = debugfs_create_dir("vpa", arch_debugfs_dir);
/* set up the per-cpu vpa file*/
for_each_possible_cpu(i) {
sprintf(name, "cpu-%ld", i);
debugfs_create_file(name, 0400, vpa_dir, (void *)i, &vpa_fops);
}
return 0;
}
machine_arch_initcall(pseries, vpa_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
| linux-master | arch/powerpc/platforms/pseries/lpar.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Handles hot and cold plug of persistent memory regions on pseries.
*/
#define pr_fmt(fmt) "pseries-pmem: " fmt
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/sched.h> /* for idle_task_exit */
#include <linux/sched/hotplug.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <asm/rtas.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/plpar_wrappers.h>
#include <asm/topology.h>
#include "pseries.h"
static struct device_node *pmem_node;
static ssize_t pmem_drc_add_node(u32 drc_index)
{
struct device_node *dn;
int rc;
pr_debug("Attempting to add pmem node, drc index: %x\n", drc_index);
rc = dlpar_acquire_drc(drc_index);
if (rc) {
pr_err("Failed to acquire DRC, rc: %d, drc index: %x\n",
rc, drc_index);
return -EINVAL;
}
dn = dlpar_configure_connector(cpu_to_be32(drc_index), pmem_node);
if (!dn) {
pr_err("configure-connector failed for drc %x\n", drc_index);
dlpar_release_drc(drc_index);
return -EINVAL;
}
/* NB: The of reconfig notifier creates platform device from the node */
rc = dlpar_attach_node(dn, pmem_node);
if (rc) {
pr_err("Failed to attach node %pOF, rc: %d, drc index: %x\n",
dn, rc, drc_index);
if (dlpar_release_drc(drc_index))
dlpar_free_cc_nodes(dn);
return rc;
}
pr_info("Successfully added %pOF, drc index: %x\n", dn, drc_index);
return 0;
}
static ssize_t pmem_drc_remove_node(u32 drc_index)
{
struct device_node *dn;
uint32_t index;
int rc;
for_each_child_of_node(pmem_node, dn) {
if (of_property_read_u32(dn, "ibm,my-drc-index", &index))
continue;
if (index == drc_index)
break;
}
if (!dn) {
pr_err("Attempting to remove unused DRC index %x\n", drc_index);
return -ENODEV;
}
pr_debug("Attempting to remove %pOF, drc index: %x\n", dn, drc_index);
/* * NB: tears down the ibm,pmemory device as a side-effect */
rc = dlpar_detach_node(dn);
if (rc)
return rc;
rc = dlpar_release_drc(drc_index);
if (rc) {
pr_err("Failed to release drc (%x) for CPU %pOFn, rc: %d\n",
drc_index, dn, rc);
dlpar_attach_node(dn, pmem_node);
return rc;
}
pr_info("Successfully removed PMEM with drc index: %x\n", drc_index);
return 0;
}
int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog)
{
u32 drc_index;
int rc;
/* slim chance, but we might get a hotplug event while booting */
if (!pmem_node)
pmem_node = of_find_node_by_type(NULL, "ibm,persistent-memory");
if (!pmem_node) {
pr_err("Hotplug event for a pmem device, but none exists\n");
return -ENODEV;
}
if (hp_elog->id_type != PSERIES_HP_ELOG_ID_DRC_INDEX) {
pr_err("Unsupported hotplug event type %d\n",
hp_elog->id_type);
return -EINVAL;
}
drc_index = hp_elog->_drc_u.drc_index;
lock_device_hotplug();
if (hp_elog->action == PSERIES_HP_ELOG_ACTION_ADD) {
rc = pmem_drc_add_node(drc_index);
} else if (hp_elog->action == PSERIES_HP_ELOG_ACTION_REMOVE) {
rc = pmem_drc_remove_node(drc_index);
} else {
pr_err("Unsupported hotplug action (%d)\n", hp_elog->action);
rc = -EINVAL;
}
unlock_device_hotplug();
return rc;
}
static const struct of_device_id drc_pmem_match[] = {
{ .type = "ibm,persistent-memory", },
{}
};
static int pseries_pmem_init(void)
{
/*
* Only supported on POWER8 and above.
*/
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return 0;
pmem_node = of_find_node_by_type(NULL, "ibm,persistent-memory");
if (!pmem_node)
return 0;
/*
* The generic OF bus probe/populate handles creating platform devices
* from the child (ibm,pmemory) nodes. The generic code registers an of
* reconfig notifier to handle the hot-add/remove cases too.
*/
of_platform_bus_probe(pmem_node, drc_pmem_match, NULL);
return 0;
}
machine_arch_initcall(pseries, pseries_pmem_init);
| linux-master | arch/powerpc/platforms/pseries/pmem.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pseries CPU Hotplug infrastructure.
*
* Split out from arch/powerpc/platforms/pseries/setup.c
* arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
*
* Peter Bergner, IBM March 2001.
* Copyright (C) 2001 IBM.
* Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
* Plus various changes from other IBM teams...
*
* Copyright (C) 2006 Michael Ellerman, IBM Corporation
*/
#define pr_fmt(fmt) "pseries-hotplug-cpu: " fmt
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/sched.h> /* for idle_task_exit */
#include <linux/sched/hotplug.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/xics.h>
#include <asm/xive.h>
#include <asm/plpar_wrappers.h>
#include <asm/topology.h>
#include "pseries.h"
/* This version can't take the spinlock, because it never returns */
static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
/*
* Record the CPU ids used on each nodes.
* Protected by cpu_add_remove_lock.
*/
static cpumask_var_t node_recorded_ids_map[MAX_NUMNODES];
static void rtas_stop_self(void)
{
static struct rtas_args args;
local_irq_disable();
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
panic("Alas, I survived.\n");
}
static void pseries_cpu_offline_self(void)
{
unsigned int hwcpu = hard_smp_processor_id();
local_irq_disable();
idle_task_exit();
if (xive_enabled())
xive_teardown_cpu();
else
xics_teardown_cpu();
unregister_slb_shadow(hwcpu);
unregister_vpa(hwcpu);
rtas_stop_self();
/* Should never get here... */
BUG();
for(;;);
}
static int pseries_cpu_disable(void)
{
int cpu = smp_processor_id();
set_cpu_online(cpu, false);
vdso_data->processorCount--;
/*fix boot_cpuid here*/
if (cpu == boot_cpuid)
boot_cpuid = cpumask_any(cpu_online_mask);
/* FIXME: abstract this to not be platform specific later on */
if (xive_enabled())
xive_smp_disable_cpu();
else
xics_migrate_irqs_away();
cleanup_cpu_mmu_context();
return 0;
}
/*
* pseries_cpu_die: Wait for the cpu to die.
* @cpu: logical processor id of the CPU whose death we're awaiting.
*
* This function is called from the context of the thread which is performing
* the cpu-offline. Here we wait for long enough to allow the cpu in question
* to self-destroy so that the cpu-offline thread can send the CPU_DEAD
* notifications.
*
* OTOH, pseries_cpu_offline_self() is called by the @cpu when it wants to
* self-destruct.
*/
static void pseries_cpu_die(unsigned int cpu)
{
int cpu_status = 1;
unsigned int pcpu = get_hard_smp_processor_id(cpu);
unsigned long timeout = jiffies + msecs_to_jiffies(120000);
while (true) {
cpu_status = smp_query_cpu_stopped(pcpu);
if (cpu_status == QCSS_STOPPED ||
cpu_status == QCSS_HARDWARE_ERROR)
break;
if (time_after(jiffies, timeout)) {
pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
cpu, pcpu);
timeout = jiffies + msecs_to_jiffies(120000);
}
cond_resched();
}
if (cpu_status == QCSS_HARDWARE_ERROR) {
pr_warn("CPU %i (hwid %i) reported error while dying\n",
cpu, pcpu);
}
paca_ptrs[cpu]->cpu_start = 0;
}
/**
* find_cpu_id_range - found a linear ranger of @nthreads free CPU ids.
* @nthreads : the number of threads (cpu ids)
* @assigned_node : the node it belongs to or NUMA_NO_NODE if free ids from any
* node can be peek.
* @cpu_mask: the returned CPU mask.
*
* Returns 0 on success.
*/
static int find_cpu_id_range(unsigned int nthreads, int assigned_node,
cpumask_var_t *cpu_mask)
{
cpumask_var_t candidate_mask;
unsigned int cpu, node;
int rc = -ENOSPC;
if (!zalloc_cpumask_var(&candidate_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_clear(*cpu_mask);
for (cpu = 0; cpu < nthreads; cpu++)
cpumask_set_cpu(cpu, *cpu_mask);
BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
/* Get a bitmap of unoccupied slots. */
cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
if (assigned_node != NUMA_NO_NODE) {
/*
* Remove free ids previously assigned on the other nodes. We
* can walk only online nodes because once a node became online
* it is not turned offlined back.
*/
for_each_online_node(node) {
if (node == assigned_node)
continue;
cpumask_andnot(candidate_mask, candidate_mask,
node_recorded_ids_map[node]);
}
}
if (cpumask_empty(candidate_mask))
goto out;
while (!cpumask_empty(*cpu_mask)) {
if (cpumask_subset(*cpu_mask, candidate_mask))
/* Found a range where we can insert the new cpu(s) */
break;
cpumask_shift_left(*cpu_mask, *cpu_mask, nthreads);
}
if (!cpumask_empty(*cpu_mask))
rc = 0;
out:
free_cpumask_var(candidate_mask);
return rc;
}
/*
* Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle
* here is that a cpu device node may represent multiple logical cpus
* in the SMT case. We must honor the assumption in other code that
* the logical ids for sibling SMT threads x and y are adjacent, such
* that x^1 == y and y^1 == x.
*/
static int pseries_add_processor(struct device_node *np)
{
int len, nthreads, node, cpu, assigned_node;
int rc = 0;
cpumask_var_t cpu_mask;
const __be32 *intserv;
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return 0;
nthreads = len / sizeof(u32);
if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL))
return -ENOMEM;
/*
* Fetch from the DT nodes read by dlpar_configure_connector() the NUMA
* node id the added CPU belongs to.
*/
node = of_node_to_nid(np);
if (node < 0 || !node_possible(node))
node = first_online_node;
BUG_ON(node == NUMA_NO_NODE);
assigned_node = node;
cpu_maps_update_begin();
rc = find_cpu_id_range(nthreads, node, &cpu_mask);
if (rc && nr_node_ids > 1) {
/*
* Try again, considering the free CPU ids from the other node.
*/
node = NUMA_NO_NODE;
rc = find_cpu_id_range(nthreads, NUMA_NO_NODE, &cpu_mask);
}
if (rc) {
pr_err("Cannot add cpu %pOF; this system configuration"
" supports %d logical cpus.\n", np, num_possible_cpus());
goto out;
}
for_each_cpu(cpu, cpu_mask) {
BUG_ON(cpu_present(cpu));
set_cpu_present(cpu, true);
set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
}
/* Record the newly used CPU ids for the associate node. */
cpumask_or(node_recorded_ids_map[assigned_node],
node_recorded_ids_map[assigned_node], cpu_mask);
/*
* If node is set to NUMA_NO_NODE, CPU ids have be reused from
* another node, remove them from its mask.
*/
if (node == NUMA_NO_NODE) {
cpu = cpumask_first(cpu_mask);
pr_warn("Reusing free CPU ids %d-%d from another node\n",
cpu, cpu + nthreads - 1);
for_each_online_node(node) {
if (node == assigned_node)
continue;
cpumask_andnot(node_recorded_ids_map[node],
node_recorded_ids_map[node],
cpu_mask);
}
}
out:
cpu_maps_update_done();
free_cpumask_var(cpu_mask);
return rc;
}
/*
* Update the present map for a cpu node which is going away, and set
* the hard id in the paca(s) to -1 to be consistent with boot time
* convention for non-present cpus.
*/
static void pseries_remove_processor(struct device_node *np)
{
unsigned int cpu;
int len, nthreads, i;
const __be32 *intserv;
u32 thread;
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return;
nthreads = len / sizeof(u32);
cpu_maps_update_begin();
for (i = 0; i < nthreads; i++) {
thread = be32_to_cpu(intserv[i]);
for_each_present_cpu(cpu) {
if (get_hard_smp_processor_id(cpu) != thread)
continue;
BUG_ON(cpu_online(cpu));
set_cpu_present(cpu, false);
set_hard_smp_processor_id(cpu, -1);
update_numa_cpu_lookup_table(cpu, -1);
break;
}
if (cpu >= nr_cpu_ids)
printk(KERN_WARNING "Could not find cpu to remove "
"with physical id 0x%x\n", thread);
}
cpu_maps_update_done();
}
static int dlpar_offline_cpu(struct device_node *dn)
{
int rc = 0;
unsigned int cpu;
int len, nthreads, i;
const __be32 *intserv;
u32 thread;
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return -EINVAL;
nthreads = len / sizeof(u32);
cpu_maps_update_begin();
for (i = 0; i < nthreads; i++) {
thread = be32_to_cpu(intserv[i]);
for_each_present_cpu(cpu) {
if (get_hard_smp_processor_id(cpu) != thread)
continue;
if (!cpu_online(cpu))
break;
/*
* device_offline() will return -EBUSY (via cpu_down()) if there
* is only one CPU left. Check it here to fail earlier and with a
* more informative error message, while also retaining the
* cpu_add_remove_lock to be sure that no CPUs are being
* online/offlined during this check.
*/
if (num_online_cpus() == 1) {
pr_warn("Unable to remove last online CPU %pOFn\n", dn);
rc = -EBUSY;
goto out_unlock;
}
cpu_maps_update_done();
rc = device_offline(get_cpu_device(cpu));
if (rc)
goto out;
cpu_maps_update_begin();
break;
}
if (cpu == num_possible_cpus()) {
pr_warn("Could not find cpu to offline with physical id 0x%x\n",
thread);
}
}
out_unlock:
cpu_maps_update_done();
out:
return rc;
}
static int dlpar_online_cpu(struct device_node *dn)
{
int rc = 0;
unsigned int cpu;
int len, nthreads, i;
const __be32 *intserv;
u32 thread;
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return -EINVAL;
nthreads = len / sizeof(u32);
cpu_maps_update_begin();
for (i = 0; i < nthreads; i++) {
thread = be32_to_cpu(intserv[i]);
for_each_present_cpu(cpu) {
if (get_hard_smp_processor_id(cpu) != thread)
continue;
if (!topology_is_primary_thread(cpu)) {
if (cpu_smt_control != CPU_SMT_ENABLED)
break;
if (!topology_smt_thread_allowed(cpu))
break;
}
cpu_maps_update_done();
find_and_update_cpu_nid(cpu);
rc = device_online(get_cpu_device(cpu));
if (rc) {
dlpar_offline_cpu(dn);
goto out;
}
cpu_maps_update_begin();
break;
}
if (cpu == num_possible_cpus())
printk(KERN_WARNING "Could not find cpu to online "
"with physical id 0x%x\n", thread);
}
cpu_maps_update_done();
out:
return rc;
}
static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index)
{
struct device_node *child = NULL;
u32 my_drc_index;
bool found;
int rc;
/* Assume cpu doesn't exist */
found = false;
for_each_child_of_node(parent, child) {
rc = of_property_read_u32(child, "ibm,my-drc-index",
&my_drc_index);
if (rc)
continue;
if (my_drc_index == drc_index) {
of_node_put(child);
found = true;
break;
}
}
return found;
}
static bool drc_info_valid_index(struct device_node *parent, u32 drc_index)
{
struct property *info;
struct of_drc_info drc;
const __be32 *value;
u32 index;
int count, i, j;
info = of_find_property(parent, "ibm,drc-info", NULL);
if (!info)
return false;
value = of_prop_next_u32(info, NULL, &count);
/* First value of ibm,drc-info is number of drc-info records */
if (value)
value++;
else
return false;
for (i = 0; i < count; i++) {
if (of_read_drc_info_cell(&info, &value, &drc))
return false;
if (strncmp(drc.drc_type, "CPU", 3))
break;
if (drc_index > drc.last_drc_index)
continue;
index = drc.drc_index_start;
for (j = 0; j < drc.num_sequential_elems; j++) {
if (drc_index == index)
return true;
index += drc.sequential_inc;
}
}
return false;
}
static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
{
bool found = false;
int rc, index;
if (of_property_present(parent, "ibm,drc-info"))
return drc_info_valid_index(parent, drc_index);
/* Note that the format of the ibm,drc-indexes array is
* the number of entries in the array followed by the array
* of drc values so we start looking at index = 1.
*/
index = 1;
while (!found) {
u32 drc;
rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
index++, &drc);
if (rc)
break;
if (drc == drc_index)
found = true;
}
return found;
}
static int pseries_cpuhp_attach_nodes(struct device_node *dn)
{
struct of_changeset cs;
int ret;
/*
* This device node is unattached but may have siblings; open-code the
* traversal.
*/
for (of_changeset_init(&cs); dn != NULL; dn = dn->sibling) {
ret = of_changeset_attach_node(&cs, dn);
if (ret)
goto out;
}
ret = of_changeset_apply(&cs);
out:
of_changeset_destroy(&cs);
return ret;
}
static ssize_t dlpar_cpu_add(u32 drc_index)
{
struct device_node *dn, *parent;
int rc, saved_rc;
pr_debug("Attempting to add CPU, drc index: %x\n", drc_index);
parent = of_find_node_by_path("/cpus");
if (!parent) {
pr_warn("Failed to find CPU root node \"/cpus\"\n");
return -ENODEV;
}
if (dlpar_cpu_exists(parent, drc_index)) {
of_node_put(parent);
pr_warn("CPU with drc index %x already exists\n", drc_index);
return -EINVAL;
}
if (!valid_cpu_drc_index(parent, drc_index)) {
of_node_put(parent);
pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index);
return -EINVAL;
}
rc = dlpar_acquire_drc(drc_index);
if (rc) {
pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
rc, drc_index);
of_node_put(parent);
return -EINVAL;
}
dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
if (!dn) {
pr_warn("Failed call to configure-connector, drc index: %x\n",
drc_index);
dlpar_release_drc(drc_index);
of_node_put(parent);
return -EINVAL;
}
rc = pseries_cpuhp_attach_nodes(dn);
/* Regardless we are done with parent now */
of_node_put(parent);
if (rc) {
saved_rc = rc;
pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n",
dn, rc, drc_index);
rc = dlpar_release_drc(drc_index);
if (!rc)
dlpar_free_cc_nodes(dn);
return saved_rc;
}
update_numa_distance(dn);
rc = dlpar_online_cpu(dn);
if (rc) {
saved_rc = rc;
pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n",
dn, rc, drc_index);
rc = dlpar_detach_node(dn);
if (!rc)
dlpar_release_drc(drc_index);
return saved_rc;
}
pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn,
drc_index);
return rc;
}
static unsigned int pseries_cpuhp_cache_use_count(const struct device_node *cachedn)
{
unsigned int use_count = 0;
struct device_node *dn, *tn;
WARN_ON(!of_node_is_type(cachedn, "cache"));
for_each_of_cpu_node(dn) {
tn = of_find_next_cache_node(dn);
of_node_put(tn);
if (tn == cachedn)
use_count++;
}
for_each_node_by_type(dn, "cache") {
tn = of_find_next_cache_node(dn);
of_node_put(tn);
if (tn == cachedn)
use_count++;
}
return use_count;
}
static int pseries_cpuhp_detach_nodes(struct device_node *cpudn)
{
struct device_node *dn;
struct of_changeset cs;
int ret = 0;
of_changeset_init(&cs);
ret = of_changeset_detach_node(&cs, cpudn);
if (ret)
goto out;
dn = cpudn;
while ((dn = of_find_next_cache_node(dn))) {
if (pseries_cpuhp_cache_use_count(dn) > 1) {
of_node_put(dn);
break;
}
ret = of_changeset_detach_node(&cs, dn);
of_node_put(dn);
if (ret)
goto out;
}
ret = of_changeset_apply(&cs);
out:
of_changeset_destroy(&cs);
return ret;
}
static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
{
int rc;
pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n",
dn, drc_index);
rc = dlpar_offline_cpu(dn);
if (rc) {
pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc);
return -EINVAL;
}
rc = dlpar_release_drc(drc_index);
if (rc) {
pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n",
drc_index, dn, rc);
dlpar_online_cpu(dn);
return rc;
}
rc = pseries_cpuhp_detach_nodes(dn);
if (rc) {
int saved_rc = rc;
pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc);
rc = dlpar_acquire_drc(drc_index);
if (!rc)
dlpar_online_cpu(dn);
return saved_rc;
}
pr_debug("Successfully removed CPU, drc index: %x\n", drc_index);
return 0;
}
static struct device_node *cpu_drc_index_to_dn(u32 drc_index)
{
struct device_node *dn;
u32 my_index;
int rc;
for_each_node_by_type(dn, "cpu") {
rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index);
if (rc)
continue;
if (my_index == drc_index)
break;
}
return dn;
}
static int dlpar_cpu_remove_by_index(u32 drc_index)
{
struct device_node *dn;
int rc;
dn = cpu_drc_index_to_dn(drc_index);
if (!dn) {
pr_warn("Cannot find CPU (drc index %x) to remove\n",
drc_index);
return -ENODEV;
}
rc = dlpar_cpu_remove(dn, drc_index);
of_node_put(dn);
return rc;
}
int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
{
u32 drc_index;
int rc;
drc_index = hp_elog->_drc_u.drc_index;
lock_device_hotplug();
switch (hp_elog->action) {
case PSERIES_HP_ELOG_ACTION_REMOVE:
if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
rc = dlpar_cpu_remove_by_index(drc_index);
/*
* Setting the isolation state of an UNISOLATED/CONFIGURED
* device to UNISOLATE is a no-op, but the hypervisor can
* use it as a hint that the CPU removal failed.
*/
if (rc)
dlpar_unisolate_drc(drc_index);
}
else
rc = -EINVAL;
break;
case PSERIES_HP_ELOG_ACTION_ADD:
if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
rc = dlpar_cpu_add(drc_index);
else
rc = -EINVAL;
break;
default:
pr_err("Invalid action (%d) specified\n", hp_elog->action);
rc = -EINVAL;
break;
}
unlock_device_hotplug();
return rc;
}
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
{
u32 drc_index;
int rc;
rc = kstrtou32(buf, 0, &drc_index);
if (rc)
return -EINVAL;
rc = dlpar_cpu_add(drc_index);
return rc ? rc : count;
}
static ssize_t dlpar_cpu_release(const char *buf, size_t count)
{
struct device_node *dn;
u32 drc_index;
int rc;
dn = of_find_node_by_path(buf);
if (!dn)
return -EINVAL;
rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
if (rc) {
of_node_put(dn);
return -EINVAL;
}
rc = dlpar_cpu_remove(dn, drc_index);
of_node_put(dn);
return rc ? rc : count;
}
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
static int pseries_smp_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct of_reconfig_data *rd = data;
int err = 0;
switch (action) {
case OF_RECONFIG_ATTACH_NODE:
err = pseries_add_processor(rd->dn);
break;
case OF_RECONFIG_DETACH_NODE:
pseries_remove_processor(rd->dn);
break;
}
return notifier_from_errno(err);
}
static struct notifier_block pseries_smp_nb = {
.notifier_call = pseries_smp_notifier,
};
void __init pseries_cpu_hotplug_init(void)
{
int qcss_tok;
rtas_stop_self_token = rtas_function_token(RTAS_FN_STOP_SELF);
qcss_tok = rtas_function_token(RTAS_FN_QUERY_CPU_STOPPED_STATE);
if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "CPU Hotplug not supported by firmware "
"- disabling.\n");
return;
}
smp_ops->cpu_offline_self = pseries_cpu_offline_self;
smp_ops->cpu_disable = pseries_cpu_disable;
smp_ops->cpu_die = pseries_cpu_die;
}
static int __init pseries_dlpar_init(void)
{
unsigned int node;
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
ppc_md.cpu_probe = dlpar_cpu_probe;
ppc_md.cpu_release = dlpar_cpu_release;
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
/* Processors can be added/removed only on LPAR */
if (firmware_has_feature(FW_FEATURE_LPAR)) {
for_each_node(node) {
if (!alloc_cpumask_var_node(&node_recorded_ids_map[node],
GFP_KERNEL, node))
return -ENOMEM;
/* Record ids of CPU added at boot time */
cpumask_copy(node_recorded_ids_map[node],
cpumask_of_node(node));
}
of_reconfig_notifier_register(&pseries_smp_nb);
}
return 0;
}
machine_arch_initcall(pseries, pseries_dlpar_init);
| linux-master | arch/powerpc/platforms/pseries/hotplug-cpu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2022-23 IBM Corp.
*/
#define pr_fmt(fmt) "vas: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/kobject.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include "vas.h"
#ifdef CONFIG_SYSFS
static struct kobject *pseries_vas_kobj;
static struct kobject *gzip_caps_kobj;
struct vas_caps_entry {
struct kobject kobj;
struct vas_cop_feat_caps *caps;
};
#define to_caps_entry(entry) container_of(entry, struct vas_caps_entry, kobj)
/*
* This function is used to get the notification from the drmgr when
* QoS credits are changed.
*/
static ssize_t update_total_credits_store(struct vas_cop_feat_caps *caps,
const char *buf, size_t count)
{
int err;
u16 creds;
err = kstrtou16(buf, 0, &creds);
/*
* The user space interface from the management console
* notifies OS with the new QoS credits and then the
* hypervisor. So OS has to use this new credits value
* and reconfigure VAS windows (close or reopen depends
* on the credits available) instead of depending on VAS
* QoS capabilities from the hypervisor.
*/
if (!err)
err = vas_reconfig_capabilties(caps->win_type, creds);
if (err)
return -EINVAL;
pr_info("Set QoS total credits %u\n", creds);
return count;
}
#define sysfs_caps_entry_read(_name) \
static ssize_t _name##_show(struct vas_cop_feat_caps *caps, char *buf) \
{ \
return sprintf(buf, "%d\n", atomic_read(&caps->_name)); \
}
struct vas_sysfs_entry {
struct attribute attr;
ssize_t (*show)(struct vas_cop_feat_caps *, char *);
ssize_t (*store)(struct vas_cop_feat_caps *, const char *, size_t);
};
#define VAS_ATTR_RO(_name) \
sysfs_caps_entry_read(_name); \
static struct vas_sysfs_entry _name##_attribute = __ATTR(_name, \
0444, _name##_show, NULL);
/*
* Create sysfs interface:
* /sys/devices/virtual/misc/vas/vas0/gzip/default_capabilities
* This directory contains the following VAS GZIP capabilities
* for the default credit type.
* /sys/devices/virtual/misc/vas/vas0/gzip/default_capabilities/nr_total_credits
* Total number of default credits assigned to the LPAR which
* can be changed with DLPAR operation.
* /sys/devices/virtual/misc/vas/vas0/gzip/default_capabilities/nr_used_credits
* Number of credits used by the user space. One credit will
* be assigned for each window open.
*
* /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities
* This directory contains the following VAS GZIP capabilities
* for the Quality of Service (QoS) credit type.
* /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities/nr_total_credits
* Total number of QoS credits assigned to the LPAR. The user
* has to define this value using HMC interface. It can be
* changed dynamically by the user.
* /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities/nr_used_credits
* Number of credits used by the user space.
* /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities/update_total_credits
* Update total QoS credits dynamically
*/
VAS_ATTR_RO(nr_total_credits);
VAS_ATTR_RO(nr_used_credits);
static struct vas_sysfs_entry update_total_credits_attribute =
__ATTR(update_total_credits, 0200, NULL, update_total_credits_store);
static struct attribute *vas_def_capab_attrs[] = {
&nr_total_credits_attribute.attr,
&nr_used_credits_attribute.attr,
NULL,
};
ATTRIBUTE_GROUPS(vas_def_capab);
static struct attribute *vas_qos_capab_attrs[] = {
&nr_total_credits_attribute.attr,
&nr_used_credits_attribute.attr,
&update_total_credits_attribute.attr,
NULL,
};
ATTRIBUTE_GROUPS(vas_qos_capab);
static ssize_t vas_type_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct vas_caps_entry *centry;
struct vas_cop_feat_caps *caps;
struct vas_sysfs_entry *entry;
centry = to_caps_entry(kobj);
caps = centry->caps;
entry = container_of(attr, struct vas_sysfs_entry, attr);
if (!entry->show)
return -EIO;
return entry->show(caps, buf);
}
static ssize_t vas_type_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct vas_caps_entry *centry;
struct vas_cop_feat_caps *caps;
struct vas_sysfs_entry *entry;
centry = to_caps_entry(kobj);
caps = centry->caps;
entry = container_of(attr, struct vas_sysfs_entry, attr);
if (!entry->store)
return -EIO;
return entry->store(caps, buf, count);
}
static void vas_type_release(struct kobject *kobj)
{
struct vas_caps_entry *centry = to_caps_entry(kobj);
kfree(centry);
}
static const struct sysfs_ops vas_sysfs_ops = {
.show = vas_type_show,
.store = vas_type_store,
};
static struct kobj_type vas_def_attr_type = {
.release = vas_type_release,
.sysfs_ops = &vas_sysfs_ops,
.default_groups = vas_def_capab_groups,
};
static struct kobj_type vas_qos_attr_type = {
.release = vas_type_release,
.sysfs_ops = &vas_sysfs_ops,
.default_groups = vas_qos_capab_groups,
};
static char *vas_caps_kobj_name(struct vas_caps_entry *centry,
struct kobject **kobj)
{
struct vas_cop_feat_caps *caps = centry->caps;
if (caps->descriptor == VAS_GZIP_QOS_CAPABILITIES) {
kobject_init(¢ry->kobj, &vas_qos_attr_type);
*kobj = gzip_caps_kobj;
return "qos_capabilities";
} else if (caps->descriptor == VAS_GZIP_DEFAULT_CAPABILITIES) {
kobject_init(¢ry->kobj, &vas_def_attr_type);
*kobj = gzip_caps_kobj;
return "default_capabilities";
} else
return "Unknown";
}
/*
* Add feature specific capability dir entry.
* Ex: VDefGzip or VQosGzip
*/
int sysfs_add_vas_caps(struct vas_cop_feat_caps *caps)
{
struct vas_caps_entry *centry;
struct kobject *kobj = NULL;
int ret = 0;
char *name;
centry = kzalloc(sizeof(*centry), GFP_KERNEL);
if (!centry)
return -ENOMEM;
centry->caps = caps;
name = vas_caps_kobj_name(centry, &kobj);
if (kobj) {
ret = kobject_add(¢ry->kobj, kobj, "%s", name);
if (ret) {
pr_err("VAS: sysfs kobject add / event failed %d\n",
ret);
kobject_put(¢ry->kobj);
}
}
return ret;
}
static struct miscdevice vas_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "vas",
};
/*
* Add VAS and VasCaps (overall capabilities) dir entries.
*/
int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps)
{
int ret;
ret = misc_register(&vas_miscdev);
if (ret < 0) {
pr_err("%s: register vas misc device failed\n", __func__);
return ret;
}
/*
* The hypervisor does not expose multiple VAS instances, but can
* see multiple VAS instances on PowerNV. So create 'vas0' directory
* on pseries.
*/
pseries_vas_kobj = kobject_create_and_add("vas0",
&vas_miscdev.this_device->kobj);
if (!pseries_vas_kobj) {
misc_deregister(&vas_miscdev);
pr_err("Failed to create VAS sysfs entry\n");
return -ENOMEM;
}
if ((vas_caps->feat_type & VAS_GZIP_QOS_FEAT_BIT) ||
(vas_caps->feat_type & VAS_GZIP_DEF_FEAT_BIT)) {
gzip_caps_kobj = kobject_create_and_add("gzip",
pseries_vas_kobj);
if (!gzip_caps_kobj) {
pr_err("Failed to create VAS GZIP capability entry\n");
kobject_put(pseries_vas_kobj);
misc_deregister(&vas_miscdev);
return -ENOMEM;
}
}
return 0;
}
#else
int sysfs_add_vas_caps(struct vas_cop_feat_caps *caps)
{
return 0;
}
int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps)
{
return 0;
}
#endif
| linux-master | arch/powerpc/platforms/pseries/vas-sysfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001 Dave Engebretsen IBM Corporation
*/
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/fs.h>
#include <linux/reboot.h>
#include <linux/irq_work.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/firmware.h>
#include <asm/mce.h>
#include "pseries.h"
static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
static DEFINE_SPINLOCK(ras_log_buf_lock);
static int ras_check_exception_token;
#define EPOW_SENSOR_TOKEN 9
#define EPOW_SENSOR_INDEX 0
/* EPOW events counter variable */
static int num_epow_events;
static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id);
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
/* RTAS pseries MCE errorlog section. */
struct pseries_mc_errorlog {
__be32 fru_id;
__be32 proc_id;
u8 error_type;
/*
* sub_err_type (1 byte). Bit fields depends on error_type
*
* MSB0
* |
* V
* 01234567
* XXXXXXXX
*
* For error_type == MC_ERROR_TYPE_UE
* XXXXXXXX
* X 1: Permanent or Transient UE.
* X 1: Effective address provided.
* X 1: Logical address provided.
* XX 2: Reserved.
* XXX 3: Type of UE error.
*
* For error_type == MC_ERROR_TYPE_SLB/ERAT/TLB
* XXXXXXXX
* X 1: Effective address provided.
* XXXXX 5: Reserved.
* XX 2: Type of SLB/ERAT/TLB error.
*
* For error_type == MC_ERROR_TYPE_CTRL_MEM_ACCESS
* XXXXXXXX
* X 1: Error causing address provided.
* XXX 3: Type of error.
* XXXX 4: Reserved.
*/
u8 sub_err_type;
u8 reserved_1[6];
__be64 effective_address;
__be64 logical_address;
} __packed;
/* RTAS pseries MCE error types */
#define MC_ERROR_TYPE_UE 0x00
#define MC_ERROR_TYPE_SLB 0x01
#define MC_ERROR_TYPE_ERAT 0x02
#define MC_ERROR_TYPE_UNKNOWN 0x03
#define MC_ERROR_TYPE_TLB 0x04
#define MC_ERROR_TYPE_D_CACHE 0x05
#define MC_ERROR_TYPE_I_CACHE 0x07
#define MC_ERROR_TYPE_CTRL_MEM_ACCESS 0x08
/* RTAS pseries MCE error sub types */
#define MC_ERROR_UE_INDETERMINATE 0
#define MC_ERROR_UE_IFETCH 1
#define MC_ERROR_UE_PAGE_TABLE_WALK_IFETCH 2
#define MC_ERROR_UE_LOAD_STORE 3
#define MC_ERROR_UE_PAGE_TABLE_WALK_LOAD_STORE 4
#define UE_EFFECTIVE_ADDR_PROVIDED 0x40
#define UE_LOGICAL_ADDR_PROVIDED 0x20
#define MC_EFFECTIVE_ADDR_PROVIDED 0x80
#define MC_ERROR_SLB_PARITY 0
#define MC_ERROR_SLB_MULTIHIT 1
#define MC_ERROR_SLB_INDETERMINATE 2
#define MC_ERROR_ERAT_PARITY 1
#define MC_ERROR_ERAT_MULTIHIT 2
#define MC_ERROR_ERAT_INDETERMINATE 3
#define MC_ERROR_TLB_PARITY 1
#define MC_ERROR_TLB_MULTIHIT 2
#define MC_ERROR_TLB_INDETERMINATE 3
#define MC_ERROR_CTRL_MEM_ACCESS_PTABLE_WALK 0
#define MC_ERROR_CTRL_MEM_ACCESS_OP_ACCESS 1
static inline u8 rtas_mc_error_sub_type(const struct pseries_mc_errorlog *mlog)
{
switch (mlog->error_type) {
case MC_ERROR_TYPE_UE:
return (mlog->sub_err_type & 0x07);
case MC_ERROR_TYPE_SLB:
case MC_ERROR_TYPE_ERAT:
case MC_ERROR_TYPE_TLB:
return (mlog->sub_err_type & 0x03);
case MC_ERROR_TYPE_CTRL_MEM_ACCESS:
return (mlog->sub_err_type & 0x70) >> 4;
default:
return 0;
}
}
/*
* Enable the hotplug interrupt late because processing them may touch other
* devices or systems (e.g. hugepages) that have not been initialized at the
* subsys stage.
*/
static int __init init_ras_hotplug_IRQ(void)
{
struct device_node *np;
/* Hotplug Events */
np = of_find_node_by_path("/event-sources/hot-plug-events");
if (np != NULL) {
if (dlpar_workqueue_init() == 0)
request_event_sources_irqs(np, ras_hotplug_interrupt,
"RAS_HOTPLUG");
of_node_put(np);
}
return 0;
}
machine_late_initcall(pseries, init_ras_hotplug_IRQ);
/*
* Initialize handlers for the set of interrupts caused by hardware errors
* and power system events.
*/
static int __init init_ras_IRQ(void)
{
struct device_node *np;
ras_check_exception_token = rtas_function_token(RTAS_FN_CHECK_EXCEPTION);
/* Internal Errors */
np = of_find_node_by_path("/event-sources/internal-errors");
if (np != NULL) {
request_event_sources_irqs(np, ras_error_interrupt,
"RAS_ERROR");
of_node_put(np);
}
/* EPOW Events */
np = of_find_node_by_path("/event-sources/epow-events");
if (np != NULL) {
request_event_sources_irqs(np, ras_epow_interrupt, "RAS_EPOW");
of_node_put(np);
}
return 0;
}
machine_subsys_initcall(pseries, init_ras_IRQ);
#define EPOW_SHUTDOWN_NORMAL 1
#define EPOW_SHUTDOWN_ON_UPS 2
#define EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS 3
#define EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH 4
static void handle_system_shutdown(char event_modifier)
{
switch (event_modifier) {
case EPOW_SHUTDOWN_NORMAL:
pr_emerg("Power off requested\n");
orderly_poweroff(true);
break;
case EPOW_SHUTDOWN_ON_UPS:
pr_emerg("Loss of system power detected. System is running on"
" UPS/battery. Check RTAS error log for details\n");
break;
case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
pr_emerg("Loss of system critical functions detected. Check"
" RTAS error log for details\n");
orderly_poweroff(true);
break;
case EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH:
pr_emerg("High ambient temperature detected. Check RTAS"
" error log for details\n");
orderly_poweroff(true);
break;
default:
pr_err("Unknown power/cooling shutdown event (modifier = %d)\n",
event_modifier);
}
}
struct epow_errorlog {
unsigned char sensor_value;
unsigned char event_modifier;
unsigned char extended_modifier;
unsigned char reserved;
unsigned char platform_reason;
};
#define EPOW_RESET 0
#define EPOW_WARN_COOLING 1
#define EPOW_WARN_POWER 2
#define EPOW_SYSTEM_SHUTDOWN 3
#define EPOW_SYSTEM_HALT 4
#define EPOW_MAIN_ENCLOSURE 5
#define EPOW_POWER_OFF 7
static void rtas_parse_epow_errlog(struct rtas_error_log *log)
{
struct pseries_errorlog *pseries_log;
struct epow_errorlog *epow_log;
char action_code;
char modifier;
pseries_log = get_pseries_errorlog(log, PSERIES_ELOG_SECT_ID_EPOW);
if (pseries_log == NULL)
return;
epow_log = (struct epow_errorlog *)pseries_log->data;
action_code = epow_log->sensor_value & 0xF; /* bottom 4 bits */
modifier = epow_log->event_modifier & 0xF; /* bottom 4 bits */
switch (action_code) {
case EPOW_RESET:
if (num_epow_events) {
pr_info("Non critical power/cooling issue cleared\n");
num_epow_events--;
}
break;
case EPOW_WARN_COOLING:
pr_info("Non-critical cooling issue detected. Check RTAS error"
" log for details\n");
break;
case EPOW_WARN_POWER:
pr_info("Non-critical power issue detected. Check RTAS error"
" log for details\n");
break;
case EPOW_SYSTEM_SHUTDOWN:
handle_system_shutdown(modifier);
break;
case EPOW_SYSTEM_HALT:
pr_emerg("Critical power/cooling issue detected. Check RTAS"
" error log for details. Powering off.\n");
orderly_poweroff(true);
break;
case EPOW_MAIN_ENCLOSURE:
case EPOW_POWER_OFF:
pr_emerg("System about to lose power. Check RTAS error log "
" for details. Powering off immediately.\n");
emergency_sync();
kernel_power_off();
break;
default:
pr_err("Unknown power/cooling event (action code = %d)\n",
action_code);
}
/* Increment epow events counter variable */
if (action_code != EPOW_RESET)
num_epow_events++;
}
static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id)
{
struct pseries_errorlog *pseries_log;
struct pseries_hp_errorlog *hp_elog;
spin_lock(&ras_log_buf_lock);
rtas_call(ras_check_exception_token, 6, 1, NULL,
RTAS_VECTOR_EXTERNAL_INTERRUPT, virq_to_hw(irq),
RTAS_HOTPLUG_EVENTS, 0, __pa(&ras_log_buf),
rtas_get_error_log_max());
pseries_log = get_pseries_errorlog((struct rtas_error_log *)ras_log_buf,
PSERIES_ELOG_SECT_ID_HOTPLUG);
hp_elog = (struct pseries_hp_errorlog *)pseries_log->data;
/*
* Since PCI hotplug is not currently supported on pseries, put PCI
* hotplug events on the ras_log_buf to be handled by rtas_errd.
*/
if (hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_MEM ||
hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_CPU ||
hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_PMEM)
queue_hotplug_event(hp_elog);
else
log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
spin_unlock(&ras_log_buf_lock);
return IRQ_HANDLED;
}
/* Handle environmental and power warning (EPOW) interrupts. */
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
{
int state;
int critical;
rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
if (state > 3)
critical = 1; /* Time Critical */
else
critical = 0;
spin_lock(&ras_log_buf_lock);
rtas_call(ras_check_exception_token, 6, 1, NULL, RTAS_VECTOR_EXTERNAL_INTERRUPT,
virq_to_hw(irq), RTAS_EPOW_WARNING, critical, __pa(&ras_log_buf),
rtas_get_error_log_max());
log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
rtas_parse_epow_errlog((struct rtas_error_log *)ras_log_buf);
spin_unlock(&ras_log_buf_lock);
return IRQ_HANDLED;
}
/*
* Handle hardware error interrupts.
*
* RTAS check-exception is called to collect data on the exception. If
* the error is deemed recoverable, we log a warning and return.
* For nonrecoverable errors, an error is logged and we stop all processing
* as quickly as possible in order to prevent propagation of the failure.
*/
static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
{
struct rtas_error_log *rtas_elog;
int status;
int fatal;
spin_lock(&ras_log_buf_lock);
status = rtas_call(ras_check_exception_token, 6, 1, NULL,
RTAS_VECTOR_EXTERNAL_INTERRUPT,
virq_to_hw(irq),
RTAS_INTERNAL_ERROR, 1 /* Time Critical */,
__pa(&ras_log_buf),
rtas_get_error_log_max());
rtas_elog = (struct rtas_error_log *)ras_log_buf;
if (status == 0 &&
rtas_error_severity(rtas_elog) >= RTAS_SEVERITY_ERROR_SYNC)
fatal = 1;
else
fatal = 0;
/* format and print the extended information */
log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
if (fatal) {
pr_emerg("Fatal hardware error detected. Check RTAS error"
" log for details. Powering off immediately\n");
emergency_sync();
kernel_power_off();
} else {
pr_err("Recoverable hardware error detected\n");
}
spin_unlock(&ras_log_buf_lock);
return IRQ_HANDLED;
}
/*
* Some versions of FWNMI place the buffer inside the 4kB page starting at
* 0x7000. Other versions place it inside the rtas buffer. We check both.
* Minimum size of the buffer is 16 bytes.
*/
#define VALID_FWNMI_BUFFER(A) \
((((A) >= 0x7000) && ((A) <= 0x8000 - 16)) || \
(((A) >= rtas.base) && ((A) <= (rtas.base + rtas.size - 16))))
static inline struct rtas_error_log *fwnmi_get_errlog(void)
{
return (struct rtas_error_log *)local_paca->mce_data_buf;
}
static __be64 *fwnmi_get_savep(struct pt_regs *regs)
{
unsigned long savep_ra;
/* Mask top two bits */
savep_ra = regs->gpr[3] & ~(0x3UL << 62);
if (!VALID_FWNMI_BUFFER(savep_ra)) {
printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
return NULL;
}
return __va(savep_ra);
}
/*
* Get the error information for errors coming through the
* FWNMI vectors. The pt_regs' r3 will be updated to reflect
* the actual r3 if possible, and a ptr to the error log entry
* will be returned if found.
*
* Use one buffer mce_data_buf per cpu to store RTAS error.
*
* The mce_data_buf does not have any locks or protection around it,
* if a second machine check comes in, or a system reset is done
* before we have logged the error, then we will get corruption in the
* error log. This is preferable over holding off on calling
* ibm,nmi-interlock which would result in us checkstopping if a
* second machine check did come in.
*/
static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
{
struct rtas_error_log *h;
__be64 *savep;
savep = fwnmi_get_savep(regs);
if (!savep)
return NULL;
regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
h = (struct rtas_error_log *)&savep[1];
/* Use the per cpu buffer from paca to store rtas error log */
memset(local_paca->mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
if (!rtas_error_extended(h)) {
memcpy(local_paca->mce_data_buf, h, sizeof(__u64));
} else {
int len, error_log_length;
error_log_length = 8 + rtas_error_extended_log_length(h);
len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
memcpy(local_paca->mce_data_buf, h, len);
}
return (struct rtas_error_log *)local_paca->mce_data_buf;
}
/* Call this when done with the data returned by FWNMI_get_errinfo.
* It will release the saved data area for other CPUs in the
* partition to receive FWNMI errors.
*/
static void fwnmi_release_errinfo(void)
{
struct rtas_args rtas_args;
int ret;
/*
* On pseries, the machine check stack is limited to under 4GB, so
* args can be on-stack.
*/
rtas_call_unlocked(&rtas_args, ibm_nmi_interlock_token, 0, 1, NULL);
ret = be32_to_cpu(rtas_args.rets[0]);
if (ret != 0)
printk(KERN_ERR "FWNMI: nmi-interlock failed: %d\n", ret);
}
int pSeries_system_reset_exception(struct pt_regs *regs)
{
#ifdef __LITTLE_ENDIAN__
/*
* Some firmware byteswaps SRR registers and gives incorrect SRR1. Try
* to detect the bad SRR1 pattern here. Flip the NIP back to correct
* endian for reporting purposes. Unfortunately the MSR can't be fixed,
* so clear it. It will be missing MSR_RI so we won't try to recover.
*/
if ((be64_to_cpu(regs->msr) &
(MSR_LE|MSR_RI|MSR_DR|MSR_IR|MSR_ME|MSR_PR|
MSR_ILE|MSR_HV|MSR_SF)) == (MSR_DR|MSR_SF)) {
regs_set_return_ip(regs, be64_to_cpu((__be64)regs->nip));
regs_set_return_msr(regs, 0);
}
#endif
if (fwnmi_active) {
__be64 *savep;
/*
* Firmware (PowerVM and KVM) saves r3 to a save area like
* machine check, which is not exactly what PAPR (2.9)
* suggests but there is no way to detect otherwise, so this
* is the interface now.
*
* System resets do not save any error log or require an
* "ibm,nmi-interlock" rtas call to release.
*/
savep = fwnmi_get_savep(regs);
if (savep)
regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
}
if (smp_handle_nmi_ipi(regs))
return 1;
return 0; /* need to perform reset */
}
static int mce_handle_err_realmode(int disposition, u8 error_type)
{
#ifdef CONFIG_PPC_BOOK3S_64
if (disposition == RTAS_DISP_NOT_RECOVERED) {
switch (error_type) {
case MC_ERROR_TYPE_ERAT:
flush_erat();
disposition = RTAS_DISP_FULLY_RECOVERED;
break;
case MC_ERROR_TYPE_SLB:
#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* Store the old slb content in paca before flushing.
* Print this when we go to virtual mode.
* There are chances that we may hit MCE again if there
* is a parity error on the SLB entry we trying to read
* for saving. Hence limit the slb saving to single
* level of recursion.
*/
if (local_paca->in_mce == 1)
slb_save_contents(local_paca->mce_faulty_slbs);
flush_and_reload_slb();
disposition = RTAS_DISP_FULLY_RECOVERED;
#endif
break;
default:
break;
}
} else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
/* Platform corrected itself but could be degraded */
pr_err("MCE: limited recovery, system may be degraded\n");
disposition = RTAS_DISP_FULLY_RECOVERED;
}
#endif
return disposition;
}
static int mce_handle_err_virtmode(struct pt_regs *regs,
struct rtas_error_log *errp,
struct pseries_mc_errorlog *mce_log,
int disposition)
{
struct mce_error_info mce_err = { 0 };
int initiator = rtas_error_initiator(errp);
int severity = rtas_error_severity(errp);
unsigned long eaddr = 0, paddr = 0;
u8 error_type, err_sub_type;
if (!mce_log)
goto out;
error_type = mce_log->error_type;
err_sub_type = rtas_mc_error_sub_type(mce_log);
if (initiator == RTAS_INITIATOR_UNKNOWN)
mce_err.initiator = MCE_INITIATOR_UNKNOWN;
else if (initiator == RTAS_INITIATOR_CPU)
mce_err.initiator = MCE_INITIATOR_CPU;
else if (initiator == RTAS_INITIATOR_PCI)
mce_err.initiator = MCE_INITIATOR_PCI;
else if (initiator == RTAS_INITIATOR_ISA)
mce_err.initiator = MCE_INITIATOR_ISA;
else if (initiator == RTAS_INITIATOR_MEMORY)
mce_err.initiator = MCE_INITIATOR_MEMORY;
else if (initiator == RTAS_INITIATOR_POWERMGM)
mce_err.initiator = MCE_INITIATOR_POWERMGM;
else
mce_err.initiator = MCE_INITIATOR_UNKNOWN;
if (severity == RTAS_SEVERITY_NO_ERROR)
mce_err.severity = MCE_SEV_NO_ERROR;
else if (severity == RTAS_SEVERITY_EVENT)
mce_err.severity = MCE_SEV_WARNING;
else if (severity == RTAS_SEVERITY_WARNING)
mce_err.severity = MCE_SEV_WARNING;
else if (severity == RTAS_SEVERITY_ERROR_SYNC)
mce_err.severity = MCE_SEV_SEVERE;
else if (severity == RTAS_SEVERITY_ERROR)
mce_err.severity = MCE_SEV_SEVERE;
else
mce_err.severity = MCE_SEV_FATAL;
if (severity <= RTAS_SEVERITY_ERROR_SYNC)
mce_err.sync_error = true;
else
mce_err.sync_error = false;
mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
mce_err.error_class = MCE_ECLASS_UNKNOWN;
switch (error_type) {
case MC_ERROR_TYPE_UE:
mce_err.error_type = MCE_ERROR_TYPE_UE;
mce_common_process_ue(regs, &mce_err);
if (mce_err.ignore_event)
disposition = RTAS_DISP_FULLY_RECOVERED;
switch (err_sub_type) {
case MC_ERROR_UE_IFETCH:
mce_err.u.ue_error_type = MCE_UE_ERROR_IFETCH;
break;
case MC_ERROR_UE_PAGE_TABLE_WALK_IFETCH:
mce_err.u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
break;
case MC_ERROR_UE_LOAD_STORE:
mce_err.u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
break;
case MC_ERROR_UE_PAGE_TABLE_WALK_LOAD_STORE:
mce_err.u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
break;
case MC_ERROR_UE_INDETERMINATE:
default:
mce_err.u.ue_error_type = MCE_UE_ERROR_INDETERMINATE;
break;
}
if (mce_log->sub_err_type & UE_EFFECTIVE_ADDR_PROVIDED)
eaddr = be64_to_cpu(mce_log->effective_address);
if (mce_log->sub_err_type & UE_LOGICAL_ADDR_PROVIDED) {
paddr = be64_to_cpu(mce_log->logical_address);
} else if (mce_log->sub_err_type & UE_EFFECTIVE_ADDR_PROVIDED) {
unsigned long pfn;
pfn = addr_to_pfn(regs, eaddr);
if (pfn != ULONG_MAX)
paddr = pfn << PAGE_SHIFT;
}
break;
case MC_ERROR_TYPE_SLB:
mce_err.error_type = MCE_ERROR_TYPE_SLB;
switch (err_sub_type) {
case MC_ERROR_SLB_PARITY:
mce_err.u.slb_error_type = MCE_SLB_ERROR_PARITY;
break;
case MC_ERROR_SLB_MULTIHIT:
mce_err.u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
break;
case MC_ERROR_SLB_INDETERMINATE:
default:
mce_err.u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE;
break;
}
if (mce_log->sub_err_type & MC_EFFECTIVE_ADDR_PROVIDED)
eaddr = be64_to_cpu(mce_log->effective_address);
break;
case MC_ERROR_TYPE_ERAT:
mce_err.error_type = MCE_ERROR_TYPE_ERAT;
switch (err_sub_type) {
case MC_ERROR_ERAT_PARITY:
mce_err.u.erat_error_type = MCE_ERAT_ERROR_PARITY;
break;
case MC_ERROR_ERAT_MULTIHIT:
mce_err.u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
break;
case MC_ERROR_ERAT_INDETERMINATE:
default:
mce_err.u.erat_error_type = MCE_ERAT_ERROR_INDETERMINATE;
break;
}
if (mce_log->sub_err_type & MC_EFFECTIVE_ADDR_PROVIDED)
eaddr = be64_to_cpu(mce_log->effective_address);
break;
case MC_ERROR_TYPE_TLB:
mce_err.error_type = MCE_ERROR_TYPE_TLB;
switch (err_sub_type) {
case MC_ERROR_TLB_PARITY:
mce_err.u.tlb_error_type = MCE_TLB_ERROR_PARITY;
break;
case MC_ERROR_TLB_MULTIHIT:
mce_err.u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
break;
case MC_ERROR_TLB_INDETERMINATE:
default:
mce_err.u.tlb_error_type = MCE_TLB_ERROR_INDETERMINATE;
break;
}
if (mce_log->sub_err_type & MC_EFFECTIVE_ADDR_PROVIDED)
eaddr = be64_to_cpu(mce_log->effective_address);
break;
case MC_ERROR_TYPE_D_CACHE:
mce_err.error_type = MCE_ERROR_TYPE_DCACHE;
break;
case MC_ERROR_TYPE_I_CACHE:
mce_err.error_type = MCE_ERROR_TYPE_ICACHE;
break;
case MC_ERROR_TYPE_CTRL_MEM_ACCESS:
mce_err.error_type = MCE_ERROR_TYPE_RA;
switch (err_sub_type) {
case MC_ERROR_CTRL_MEM_ACCESS_PTABLE_WALK:
mce_err.u.ra_error_type =
MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
break;
case MC_ERROR_CTRL_MEM_ACCESS_OP_ACCESS:
mce_err.u.ra_error_type =
MCE_RA_ERROR_LOAD_STORE_FOREIGN;
break;
}
if (mce_log->sub_err_type & MC_EFFECTIVE_ADDR_PROVIDED)
eaddr = be64_to_cpu(mce_log->effective_address);
break;
case MC_ERROR_TYPE_UNKNOWN:
default:
mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
break;
}
out:
save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
&mce_err, regs->nip, eaddr, paddr);
return disposition;
}
static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
{
struct pseries_errorlog *pseries_log;
struct pseries_mc_errorlog *mce_log = NULL;
int disposition = rtas_error_disposition(errp);
u8 error_type;
if (!rtas_error_extended(errp))
goto out;
pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
if (!pseries_log)
goto out;
mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
error_type = mce_log->error_type;
disposition = mce_handle_err_realmode(disposition, error_type);
out:
disposition = mce_handle_err_virtmode(regs, errp, mce_log,
disposition);
return disposition;
}
/*
* Process MCE rtas errlog event.
*/
void pSeries_machine_check_log_err(void)
{
struct rtas_error_log *err;
err = fwnmi_get_errlog();
log_error((char *)err, ERR_TYPE_RTAS_LOG, 0);
}
/*
* See if we can recover from a machine check exception.
* This is only called on power4 (or above) and only via
* the Firmware Non-Maskable Interrupts (fwnmi) handler
* which provides the error analysis for us.
*
* Return 1 if corrected (or delivered a signal).
* Return 0 if there is nothing we can do.
*/
static int recover_mce(struct pt_regs *regs, struct machine_check_event *evt)
{
int recovered = 0;
if (regs_is_unrecoverable(regs)) {
/* If MSR_RI isn't set, we cannot recover */
pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
recovered = 0;
} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
/* Platform corrected itself */
recovered = 1;
} else if (evt->severity == MCE_SEV_FATAL) {
/* Fatal machine check */
pr_err("Machine check interrupt is fatal\n");
recovered = 0;
}
if (!recovered && evt->sync_error) {
/*
* Try to kill processes if we get a synchronous machine check
* (e.g., one caused by execution of this instruction). This
* will devolve into a panic if we try to kill init or are in
* an interrupt etc.
*
* TODO: Queue up this address for hwpoisioning later.
* TODO: This is not quite right for d-side machine
* checks ->nip is not necessarily the important
* address.
*/
if ((user_mode(regs))) {
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
recovered = 1;
} else if (die_will_crash()) {
/*
* die() would kill the kernel, so better to go via
* the platform reboot code that will log the
* machine check.
*/
recovered = 0;
} else {
die_mce("Machine check", regs, SIGBUS);
recovered = 1;
}
}
return recovered;
}
/*
* Handle a machine check.
*
* Note that on Power 4 and beyond Firmware Non-Maskable Interrupts (fwnmi)
* should be present. If so the handler which called us tells us if the
* error was recovered (never true if RI=0).
*
* On hardware prior to Power 4 these exceptions were asynchronous which
* means we can't tell exactly where it occurred and so we can't recover.
*/
int pSeries_machine_check_exception(struct pt_regs *regs)
{
struct machine_check_event evt;
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return 0;
/* Print things out */
if (evt.version != MCE_V1) {
pr_err("Machine Check Exception, Unknown event version %d !\n",
evt.version);
return 0;
}
machine_check_print_event_info(&evt, user_mode(regs), false);
if (recover_mce(regs, &evt))
return 1;
return 0;
}
long pseries_machine_check_realmode(struct pt_regs *regs)
{
struct rtas_error_log *errp;
int disposition;
if (fwnmi_active) {
errp = fwnmi_get_errinfo(regs);
/*
* Call to fwnmi_release_errinfo() in real mode causes kernel
* to panic. Hence we will call it as soon as we go into
* virtual mode.
*/
disposition = mce_handle_error(regs, errp);
fwnmi_release_errinfo();
if (disposition == RTAS_DISP_FULLY_RECOVERED)
return 1;
}
return 0;
}
| linux-master | arch/powerpc/platforms/pseries/ras.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Collaborative memory management interface.
*
* Copyright (C) 2008 IBM Corporation
* Author(s): Brian King ([email protected]),
*/
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/oom.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/stringify.h>
#include <linux/swap.h>
#include <linux/device.h>
#include <linux/balloon_compaction.h>
#include <asm/firmware.h>
#include <asm/hvcall.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <linux/memory.h>
#include <asm/plpar_wrappers.h>
#include "pseries.h"
#define CMM_DRIVER_VERSION "1.0.0"
#define CMM_DEFAULT_DELAY 1
#define CMM_HOTPLUG_DELAY 5
#define CMM_DEBUG 0
#define CMM_DISABLE 0
#define CMM_OOM_KB 1024
#define CMM_MIN_MEM_MB 256
#define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10))
#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
#define CMM_MEM_HOTPLUG_PRI 1
static unsigned int delay = CMM_DEFAULT_DELAY;
static unsigned int hotplug_delay = CMM_HOTPLUG_DELAY;
static unsigned int oom_kb = CMM_OOM_KB;
static unsigned int cmm_debug = CMM_DEBUG;
static unsigned int cmm_disabled = CMM_DISABLE;
static unsigned long min_mem_mb = CMM_MIN_MEM_MB;
static bool __read_mostly simulate;
static unsigned long simulate_loan_target_kb;
static struct device cmm_dev;
MODULE_AUTHOR("Brian King <[email protected]>");
MODULE_DESCRIPTION("IBM System p Collaborative Memory Manager");
MODULE_LICENSE("GPL");
MODULE_VERSION(CMM_DRIVER_VERSION);
module_param_named(delay, delay, uint, 0644);
MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. "
"[Default=" __stringify(CMM_DEFAULT_DELAY) "]");
module_param_named(hotplug_delay, hotplug_delay, uint, 0644);
MODULE_PARM_DESC(hotplug_delay, "Delay (in seconds) after memory hotplug remove "
"before loaning resumes. "
"[Default=" __stringify(CMM_HOTPLUG_DELAY) "]");
module_param_named(oom_kb, oom_kb, uint, 0644);
MODULE_PARM_DESC(oom_kb, "Amount of memory in kb to free on OOM. "
"[Default=" __stringify(CMM_OOM_KB) "]");
module_param_named(min_mem_mb, min_mem_mb, ulong, 0644);
MODULE_PARM_DESC(min_mem_mb, "Minimum amount of memory (in MB) to not balloon. "
"[Default=" __stringify(CMM_MIN_MEM_MB) "]");
module_param_named(debug, cmm_debug, uint, 0644);
MODULE_PARM_DESC(debug, "Enable module debugging logging. Set to 1 to enable. "
"[Default=" __stringify(CMM_DEBUG) "]");
module_param_named(simulate, simulate, bool, 0444);
MODULE_PARM_DESC(simulate, "Enable simulation mode (no communication with hw).");
#define cmm_dbg(...) if (cmm_debug) { printk(KERN_INFO "cmm: "__VA_ARGS__); }
static atomic_long_t loaned_pages;
static unsigned long loaned_pages_target;
static unsigned long oom_freed_pages;
static DEFINE_MUTEX(hotplug_mutex);
static int hotplug_occurred; /* protected by the hotplug mutex */
static struct task_struct *cmm_thread_ptr;
static struct balloon_dev_info b_dev_info;
static long plpar_page_set_loaned(struct page *page)
{
const unsigned long vpa = page_to_phys(page);
unsigned long cmo_page_sz = cmo_get_page_size();
long rc = 0;
int i;
if (unlikely(simulate))
return 0;
for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
vpa + i - cmo_page_sz, 0);
return rc;
}
static long plpar_page_set_active(struct page *page)
{
const unsigned long vpa = page_to_phys(page);
unsigned long cmo_page_sz = cmo_get_page_size();
long rc = 0;
int i;
if (unlikely(simulate))
return 0;
for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
vpa + i - cmo_page_sz, 0);
return rc;
}
/**
* cmm_alloc_pages - Allocate pages and mark them as loaned
* @nr: number of pages to allocate
*
* Return value:
* number of pages requested to be allocated which were not
**/
static long cmm_alloc_pages(long nr)
{
struct page *page;
long rc;
cmm_dbg("Begin request for %ld pages\n", nr);
while (nr) {
/* Exit if a hotplug operation is in progress or occurred */
if (mutex_trylock(&hotplug_mutex)) {
if (hotplug_occurred) {
mutex_unlock(&hotplug_mutex);
break;
}
mutex_unlock(&hotplug_mutex);
} else {
break;
}
page = balloon_page_alloc();
if (!page)
break;
rc = plpar_page_set_loaned(page);
if (rc) {
pr_err("%s: Can not set page to loaned. rc=%ld\n", __func__, rc);
__free_page(page);
break;
}
balloon_page_enqueue(&b_dev_info, page);
atomic_long_inc(&loaned_pages);
adjust_managed_page_count(page, -1);
nr--;
}
cmm_dbg("End request with %ld pages unfulfilled\n", nr);
return nr;
}
/**
* cmm_free_pages - Free pages and mark them as active
* @nr: number of pages to free
*
* Return value:
* number of pages requested to be freed which were not
**/
static long cmm_free_pages(long nr)
{
struct page *page;
cmm_dbg("Begin free of %ld pages.\n", nr);
while (nr) {
page = balloon_page_dequeue(&b_dev_info);
if (!page)
break;
plpar_page_set_active(page);
adjust_managed_page_count(page, 1);
__free_page(page);
atomic_long_dec(&loaned_pages);
nr--;
}
cmm_dbg("End request with %ld pages unfulfilled\n", nr);
return nr;
}
/**
* cmm_oom_notify - OOM notifier
* @self: notifier block struct
* @dummy: not used
* @parm: returned - number of pages freed
*
* Return value:
* NOTIFY_OK
**/
static int cmm_oom_notify(struct notifier_block *self,
unsigned long dummy, void *parm)
{
unsigned long *freed = parm;
long nr = KB2PAGES(oom_kb);
cmm_dbg("OOM processing started\n");
nr = cmm_free_pages(nr);
loaned_pages_target = atomic_long_read(&loaned_pages);
*freed += KB2PAGES(oom_kb) - nr;
oom_freed_pages += KB2PAGES(oom_kb) - nr;
cmm_dbg("OOM processing complete\n");
return NOTIFY_OK;
}
/**
* cmm_get_mpp - Read memory performance parameters
*
* Makes hcall to query the current page loan request from the hypervisor.
*
* Return value:
* nothing
**/
static void cmm_get_mpp(void)
{
const long __loaned_pages = atomic_long_read(&loaned_pages);
const long total_pages = totalram_pages() + __loaned_pages;
int rc;
struct hvcall_mpp_data mpp_data;
signed long active_pages_target, page_loan_request, target;
signed long min_mem_pages = (min_mem_mb * 1024 * 1024) / PAGE_SIZE;
if (likely(!simulate)) {
rc = h_get_mpp(&mpp_data);
if (rc != H_SUCCESS)
return;
page_loan_request = div_s64((s64)mpp_data.loan_request,
PAGE_SIZE);
target = page_loan_request + __loaned_pages;
} else {
target = KB2PAGES(simulate_loan_target_kb);
page_loan_request = target - __loaned_pages;
}
if (target < 0 || total_pages < min_mem_pages)
target = 0;
if (target > oom_freed_pages)
target -= oom_freed_pages;
else
target = 0;
active_pages_target = total_pages - target;
if (min_mem_pages > active_pages_target)
target = total_pages - min_mem_pages;
if (target < 0)
target = 0;
loaned_pages_target = target;
cmm_dbg("delta = %ld, loaned = %lu, target = %lu, oom = %lu, totalram = %lu\n",
page_loan_request, __loaned_pages, loaned_pages_target,
oom_freed_pages, totalram_pages());
}
static struct notifier_block cmm_oom_nb = {
.notifier_call = cmm_oom_notify
};
/**
* cmm_thread - CMM task thread
* @dummy: not used
*
* Return value:
* 0
**/
static int cmm_thread(void *dummy)
{
unsigned long timeleft;
long __loaned_pages;
while (1) {
timeleft = msleep_interruptible(delay * 1000);
if (kthread_should_stop() || timeleft)
break;
if (mutex_trylock(&hotplug_mutex)) {
if (hotplug_occurred) {
hotplug_occurred = 0;
mutex_unlock(&hotplug_mutex);
cmm_dbg("Hotplug operation has occurred, "
"loaning activity suspended "
"for %d seconds.\n",
hotplug_delay);
timeleft = msleep_interruptible(hotplug_delay *
1000);
if (kthread_should_stop() || timeleft)
break;
continue;
}
mutex_unlock(&hotplug_mutex);
} else {
cmm_dbg("Hotplug operation in progress, activity "
"suspended\n");
continue;
}
cmm_get_mpp();
__loaned_pages = atomic_long_read(&loaned_pages);
if (loaned_pages_target > __loaned_pages) {
if (cmm_alloc_pages(loaned_pages_target - __loaned_pages))
loaned_pages_target = __loaned_pages;
} else if (loaned_pages_target < __loaned_pages)
cmm_free_pages(__loaned_pages - loaned_pages_target);
}
return 0;
}
#define CMM_SHOW(name, format, args...) \
static ssize_t show_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return sprintf(buf, format, ##args); \
} \
static DEVICE_ATTR(name, 0444, show_##name, NULL)
CMM_SHOW(loaned_kb, "%lu\n", PAGES2KB(atomic_long_read(&loaned_pages)));
CMM_SHOW(loaned_target_kb, "%lu\n", PAGES2KB(loaned_pages_target));
static ssize_t show_oom_pages(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", PAGES2KB(oom_freed_pages));
}
static ssize_t store_oom_pages(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long val = simple_strtoul (buf, NULL, 10);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (val != 0)
return -EBADMSG;
oom_freed_pages = 0;
return count;
}
static DEVICE_ATTR(oom_freed_kb, 0644,
show_oom_pages, store_oom_pages);
static struct device_attribute *cmm_attrs[] = {
&dev_attr_loaned_kb,
&dev_attr_loaned_target_kb,
&dev_attr_oom_freed_kb,
};
static DEVICE_ULONG_ATTR(simulate_loan_target_kb, 0644,
simulate_loan_target_kb);
static struct bus_type cmm_subsys = {
.name = "cmm",
.dev_name = "cmm",
};
static void cmm_release_device(struct device *dev)
{
}
/**
* cmm_sysfs_register - Register with sysfs
*
* Return value:
* 0 on success / other on failure
**/
static int cmm_sysfs_register(struct device *dev)
{
int i, rc;
if ((rc = subsys_system_register(&cmm_subsys, NULL)))
return rc;
dev->id = 0;
dev->bus = &cmm_subsys;
dev->release = cmm_release_device;
if ((rc = device_register(dev)))
goto subsys_unregister;
for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++) {
if ((rc = device_create_file(dev, cmm_attrs[i])))
goto fail;
}
if (!simulate)
return 0;
rc = device_create_file(dev, &dev_attr_simulate_loan_target_kb.attr);
if (rc)
goto fail;
return 0;
fail:
while (--i >= 0)
device_remove_file(dev, cmm_attrs[i]);
device_unregister(dev);
subsys_unregister:
bus_unregister(&cmm_subsys);
return rc;
}
/**
* cmm_unregister_sysfs - Unregister from sysfs
*
**/
static void cmm_unregister_sysfs(struct device *dev)
{
int i;
for (i = 0; i < ARRAY_SIZE(cmm_attrs); i++)
device_remove_file(dev, cmm_attrs[i]);
device_unregister(dev);
bus_unregister(&cmm_subsys);
}
/**
* cmm_reboot_notifier - Make sure pages are not still marked as "loaned"
*
**/
static int cmm_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
if (action == SYS_RESTART) {
if (cmm_thread_ptr)
kthread_stop(cmm_thread_ptr);
cmm_thread_ptr = NULL;
cmm_free_pages(atomic_long_read(&loaned_pages));
}
return NOTIFY_DONE;
}
static struct notifier_block cmm_reboot_nb = {
.notifier_call = cmm_reboot_notifier,
};
/**
* cmm_memory_cb - Handle memory hotplug notifier calls
* @self: notifier block struct
* @action: action to take
* @arg: struct memory_notify data for handler
*
* Return value:
* NOTIFY_OK or notifier error based on subfunction return value
*
**/
static int cmm_memory_cb(struct notifier_block *self,
unsigned long action, void *arg)
{
switch (action) {
case MEM_GOING_OFFLINE:
mutex_lock(&hotplug_mutex);
hotplug_occurred = 1;
break;
case MEM_OFFLINE:
case MEM_CANCEL_OFFLINE:
mutex_unlock(&hotplug_mutex);
cmm_dbg("Memory offline operation complete.\n");
break;
case MEM_GOING_ONLINE:
case MEM_ONLINE:
case MEM_CANCEL_ONLINE:
break;
}
return NOTIFY_OK;
}
static struct notifier_block cmm_mem_nb = {
.notifier_call = cmm_memory_cb,
.priority = CMM_MEM_HOTPLUG_PRI
};
#ifdef CONFIG_BALLOON_COMPACTION
static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
struct page *newpage, struct page *page,
enum migrate_mode mode)
{
unsigned long flags;
/*
* loan/"inflate" the newpage first.
*
* We might race against the cmm_thread who might discover after our
* loan request that another page is to be unloaned. However, once
* the cmm_thread runs again later, this error will automatically
* be corrected.
*/
if (plpar_page_set_loaned(newpage)) {
/* Unlikely, but possible. Tell the caller not to retry now. */
pr_err_ratelimited("%s: Cannot set page to loaned.", __func__);
return -EBUSY;
}
/* balloon page list reference */
get_page(newpage);
/*
* When we migrate a page to a different zone, we have to fixup the
* count of both involved zones as we adjusted the managed page count
* when inflating.
*/
if (page_zone(page) != page_zone(newpage)) {
adjust_managed_page_count(page, 1);
adjust_managed_page_count(newpage, -1);
}
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
balloon_page_insert(b_dev_info, newpage);
balloon_page_delete(page);
b_dev_info->isolated_pages--;
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
/*
* activate/"deflate" the old page. We ignore any errors just like the
* other callers.
*/
plpar_page_set_active(page);
/* balloon page list reference */
put_page(page);
return MIGRATEPAGE_SUCCESS;
}
static void cmm_balloon_compaction_init(void)
{
balloon_devinfo_init(&b_dev_info);
b_dev_info.migratepage = cmm_migratepage;
}
#else /* CONFIG_BALLOON_COMPACTION */
static void cmm_balloon_compaction_init(void)
{
}
#endif /* CONFIG_BALLOON_COMPACTION */
/**
* cmm_init - Module initialization
*
* Return value:
* 0 on success / other on failure
**/
static int cmm_init(void)
{
int rc;
if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate)
return -EOPNOTSUPP;
cmm_balloon_compaction_init();
rc = register_oom_notifier(&cmm_oom_nb);
if (rc < 0)
goto out_balloon_compaction;
if ((rc = register_reboot_notifier(&cmm_reboot_nb)))
goto out_oom_notifier;
if ((rc = cmm_sysfs_register(&cmm_dev)))
goto out_reboot_notifier;
rc = register_memory_notifier(&cmm_mem_nb);
if (rc)
goto out_unregister_notifier;
if (cmm_disabled)
return 0;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
if (IS_ERR(cmm_thread_ptr)) {
rc = PTR_ERR(cmm_thread_ptr);
goto out_unregister_notifier;
}
return 0;
out_unregister_notifier:
unregister_memory_notifier(&cmm_mem_nb);
cmm_unregister_sysfs(&cmm_dev);
out_reboot_notifier:
unregister_reboot_notifier(&cmm_reboot_nb);
out_oom_notifier:
unregister_oom_notifier(&cmm_oom_nb);
out_balloon_compaction:
return rc;
}
/**
* cmm_exit - Module exit
*
* Return value:
* nothing
**/
static void cmm_exit(void)
{
if (cmm_thread_ptr)
kthread_stop(cmm_thread_ptr);
unregister_oom_notifier(&cmm_oom_nb);
unregister_reboot_notifier(&cmm_reboot_nb);
unregister_memory_notifier(&cmm_mem_nb);
cmm_free_pages(atomic_long_read(&loaned_pages));
cmm_unregister_sysfs(&cmm_dev);
}
/**
* cmm_set_disable - Disable/Enable CMM
*
* Return value:
* 0 on success / other on failure
**/
static int cmm_set_disable(const char *val, const struct kernel_param *kp)
{
int disable = simple_strtoul(val, NULL, 10);
if (disable != 0 && disable != 1)
return -EINVAL;
if (disable && !cmm_disabled) {
if (cmm_thread_ptr)
kthread_stop(cmm_thread_ptr);
cmm_thread_ptr = NULL;
cmm_free_pages(atomic_long_read(&loaned_pages));
} else if (!disable && cmm_disabled) {
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
if (IS_ERR(cmm_thread_ptr))
return PTR_ERR(cmm_thread_ptr);
}
cmm_disabled = disable;
return 0;
}
module_param_call(disable, cmm_set_disable, param_get_uint,
&cmm_disabled, 0644);
MODULE_PARM_DESC(disable, "Disable CMM. Set to 1 to disable. "
"[Default=" __stringify(CMM_DISABLE) "]");
module_init(cmm_init);
module_exit(cmm_exit);
| linux-master | arch/powerpc/platforms/pseries/cmm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pSeries firmware setup code.
*
* Portions from arch/powerpc/platforms/pseries/setup.c:
* Copyright (C) 1995 Linus Torvalds
* Adapted from 'alpha' version by Gary Thomas
* Modified by Cort Dougan ([email protected])
* Modified by PPC64 Team, IBM Corp
*
* Portions from arch/powerpc/kernel/firmware.c
* Copyright (C) 2001 Ben. Herrenschmidt ([email protected])
* Modifications for ppc64:
* Copyright (C) 2003 Dave Engebretsen <[email protected]>
* Copyright (C) 2005 Stephen Rothwell, IBM Corporation
*
* Copyright 2006 IBM Corporation.
*/
#include <linux/of_fdt.h>
#include <asm/firmware.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/svm.h>
#include "pseries.h"
struct hypertas_fw_feature {
unsigned long val;
char * name;
};
/*
* The names in this table match names in rtas/ibm,hypertas-functions. If the
* entry ends in a '*', only upto the '*' is matched. Otherwise the entire
* string must match.
*/
static __initdata struct hypertas_fw_feature
hypertas_fw_features_table[] = {
{FW_FEATURE_PFT, "hcall-pft"},
{FW_FEATURE_TCE, "hcall-tce"},
{FW_FEATURE_SPRG0, "hcall-sprg0"},
{FW_FEATURE_DABR, "hcall-dabr"},
{FW_FEATURE_COPY, "hcall-copy"},
{FW_FEATURE_ASR, "hcall-asr"},
{FW_FEATURE_DEBUG, "hcall-debug"},
{FW_FEATURE_PERF, "hcall-perf"},
{FW_FEATURE_DUMP, "hcall-dump"},
{FW_FEATURE_INTERRUPT, "hcall-interrupt"},
{FW_FEATURE_MIGRATE, "hcall-migrate"},
{FW_FEATURE_PERFMON, "hcall-perfmon"},
{FW_FEATURE_CRQ, "hcall-crq"},
{FW_FEATURE_VIO, "hcall-vio"},
{FW_FEATURE_RDMA, "hcall-rdma"},
{FW_FEATURE_LLAN, "hcall-lLAN"},
{FW_FEATURE_BULK_REMOVE, "hcall-bulk"},
{FW_FEATURE_XDABR, "hcall-xdabr"},
{FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE,
"hcall-multi-tce"},
{FW_FEATURE_SPLPAR, "hcall-splpar"},
{FW_FEATURE_VPHN, "hcall-vphn"},
{FW_FEATURE_SET_MODE, "hcall-set-mode"},
{FW_FEATURE_BEST_ENERGY, "hcall-best-energy-1*"},
{FW_FEATURE_HPT_RESIZE, "hcall-hpt-resize"},
{FW_FEATURE_BLOCK_REMOVE, "hcall-block-remove"},
{FW_FEATURE_PAPR_SCM, "hcall-scm"},
{FW_FEATURE_RPT_INVALIDATE, "hcall-rpt-invalidate"},
{FW_FEATURE_ENERGY_SCALE_INFO, "hcall-energy-scale-info"},
{FW_FEATURE_WATCHDOG, "hcall-watchdog"},
{FW_FEATURE_PLPKS, "hcall-pks"},
};
/* Build up the firmware features bitmask using the contents of
* device-tree/ibm,hypertas-functions. Ultimately this functionality may
* be moved into prom.c prom_init().
*/
static void __init fw_hypertas_feature_init(const char *hypertas,
unsigned long len)
{
const char *s;
int i;
pr_debug(" -> fw_hypertas_feature_init()\n");
for (s = hypertas; s < hypertas + len; s += strlen(s) + 1) {
for (i = 0; i < ARRAY_SIZE(hypertas_fw_features_table); i++) {
const char *name = hypertas_fw_features_table[i].name;
size_t size;
/*
* If there is a '*' at the end of name, only check
* upto there
*/
size = strlen(name);
if (size && name[size - 1] == '*') {
if (strncmp(name, s, size - 1))
continue;
} else if (strcmp(name, s))
continue;
/* we have a match */
powerpc_firmware_features |=
hypertas_fw_features_table[i].val;
break;
}
}
if (is_secure_guest() &&
(powerpc_firmware_features & FW_FEATURE_PUT_TCE_IND)) {
powerpc_firmware_features &= ~FW_FEATURE_PUT_TCE_IND;
pr_debug("SVM: disabling PUT_TCE_IND firmware feature\n");
}
pr_debug(" <- fw_hypertas_feature_init()\n");
}
struct vec5_fw_feature {
unsigned long val;
unsigned int feature;
};
static __initdata struct vec5_fw_feature
vec5_fw_features_table[] = {
{FW_FEATURE_FORM1_AFFINITY, OV5_FORM1_AFFINITY},
{FW_FEATURE_PRRN, OV5_PRRN},
{FW_FEATURE_DRMEM_V2, OV5_DRMEM_V2},
{FW_FEATURE_DRC_INFO, OV5_DRC_INFO},
{FW_FEATURE_FORM2_AFFINITY, OV5_FORM2_AFFINITY},
};
static void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
{
unsigned int index, feat;
int i;
pr_debug(" -> fw_vec5_feature_init()\n");
for (i = 0; i < ARRAY_SIZE(vec5_fw_features_table); i++) {
index = OV5_INDX(vec5_fw_features_table[i].feature);
feat = OV5_FEAT(vec5_fw_features_table[i].feature);
if (index < len && (vec5[index] & feat))
powerpc_firmware_features |=
vec5_fw_features_table[i].val;
}
pr_debug(" <- fw_vec5_feature_init()\n");
}
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
static int __init probe_fw_features(unsigned long node, const char *uname, int
depth, void *data)
{
const char *prop;
int len;
static int hypertas_found;
static int vec5_found;
if (depth != 1)
return 0;
if (!strcmp(uname, "rtas") || !strcmp(uname, "rtas@0")) {
prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions",
&len);
if (prop) {
powerpc_firmware_features |= FW_FEATURE_LPAR;
fw_hypertas_feature_init(prop, len);
}
hypertas_found = 1;
}
if (!strcmp(uname, "chosen")) {
prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5",
&len);
if (prop)
fw_vec5_feature_init(prop, len);
vec5_found = 1;
}
return hypertas_found && vec5_found;
}
void __init pseries_probe_fw_features(void)
{
of_scan_flat_dt(probe_fw_features, NULL);
}
| linux-master | arch/powerpc/platforms/pseries/firmware.c |
// SPDX-License-Identifier: GPL-2.0-only
#define pr_fmt(fmt) "rtas-work-area: " fmt
#include <linux/genalloc.h>
#include <linux/log2.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/mempool.h>
#include <linux/minmax.h>
#include <linux/mutex.h>
#include <linux/numa.h>
#include <linux/sizes.h>
#include <linux/wait.h>
#include <asm/machdep.h>
#include <asm/rtas-work-area.h>
#include <asm/rtas.h>
enum {
/*
* Ensure the pool is page-aligned.
*/
RTAS_WORK_AREA_ARENA_ALIGN = PAGE_SIZE,
/*
* Don't let a single allocation claim the whole arena.
*/
RTAS_WORK_AREA_ARENA_SZ = RTAS_WORK_AREA_MAX_ALLOC_SZ * 2,
/*
* The smallest known work area size is for ibm,get-vpd's
* location code argument, which is limited to 79 characters
* plus 1 nul terminator.
*
* PAPR+ 7.3.20 ibm,get-vpd RTAS Call
* PAPR+ 12.3.2.4 Converged Location Code Rules - Length Restrictions
*/
RTAS_WORK_AREA_MIN_ALLOC_SZ = roundup_pow_of_two(80),
};
static struct {
struct gen_pool *gen_pool;
char *arena;
struct mutex mutex; /* serializes allocations */
struct wait_queue_head wqh;
mempool_t descriptor_pool;
bool available;
} rwa_state = {
.mutex = __MUTEX_INITIALIZER(rwa_state.mutex),
.wqh = __WAIT_QUEUE_HEAD_INITIALIZER(rwa_state.wqh),
};
/*
* A single work area buffer and descriptor to serve requests early in
* boot before the allocator is fully initialized. We know 4KB is the
* most any boot time user needs (they all call ibm,get-system-parameter).
*/
static bool early_work_area_in_use __initdata;
static char early_work_area_buf[SZ_4K] __initdata __aligned(SZ_4K);
static struct rtas_work_area early_work_area __initdata = {
.buf = early_work_area_buf,
.size = sizeof(early_work_area_buf),
};
static struct rtas_work_area * __init rtas_work_area_alloc_early(size_t size)
{
WARN_ON(size > early_work_area.size);
WARN_ON(early_work_area_in_use);
early_work_area_in_use = true;
memset(early_work_area.buf, 0, early_work_area.size);
return &early_work_area;
}
static void __init rtas_work_area_free_early(struct rtas_work_area *work_area)
{
WARN_ON(work_area != &early_work_area);
WARN_ON(!early_work_area_in_use);
early_work_area_in_use = false;
}
struct rtas_work_area * __ref __rtas_work_area_alloc(size_t size)
{
struct rtas_work_area *area;
unsigned long addr;
might_sleep();
/*
* The rtas_work_area_alloc() wrapper enforces this at build
* time. Requests that exceed the arena size will block
* indefinitely.
*/
WARN_ON(size > RTAS_WORK_AREA_MAX_ALLOC_SZ);
if (!rwa_state.available)
return rtas_work_area_alloc_early(size);
/*
* To ensure FCFS behavior and prevent a high rate of smaller
* requests from starving larger ones, use the mutex to queue
* allocations.
*/
mutex_lock(&rwa_state.mutex);
wait_event(rwa_state.wqh,
(addr = gen_pool_alloc(rwa_state.gen_pool, size)) != 0);
mutex_unlock(&rwa_state.mutex);
area = mempool_alloc(&rwa_state.descriptor_pool, GFP_KERNEL);
area->buf = (char *)addr;
area->size = size;
return area;
}
void __ref rtas_work_area_free(struct rtas_work_area *area)
{
if (!rwa_state.available) {
rtas_work_area_free_early(area);
return;
}
gen_pool_free(rwa_state.gen_pool, (unsigned long)area->buf, area->size);
mempool_free(area, &rwa_state.descriptor_pool);
wake_up(&rwa_state.wqh);
}
/*
* Initialization of the work area allocator happens in two parts. To
* reliably reserve an arena that satisfies RTAS addressing
* requirements, we must perform a memblock allocation early,
* immmediately after RTAS instantiation. Then we have to wait until
* the slab allocator is up before setting up the descriptor mempool
* and adding the arena to a gen_pool.
*/
static __init int rtas_work_area_allocator_init(void)
{
const unsigned int order = ilog2(RTAS_WORK_AREA_MIN_ALLOC_SZ);
const phys_addr_t pa_start = __pa(rwa_state.arena);
const phys_addr_t pa_end = pa_start + RTAS_WORK_AREA_ARENA_SZ - 1;
struct gen_pool *pool;
const int nid = NUMA_NO_NODE;
int err;
err = -ENOMEM;
if (!rwa_state.arena)
goto err_out;
pool = gen_pool_create(order, nid);
if (!pool)
goto err_out;
/*
* All RTAS functions that consume work areas are OK with
* natural alignment, when they have alignment requirements at
* all.
*/
gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
err = gen_pool_add(pool, (unsigned long)rwa_state.arena,
RTAS_WORK_AREA_ARENA_SZ, nid);
if (err)
goto err_destroy;
err = mempool_init_kmalloc_pool(&rwa_state.descriptor_pool, 1,
sizeof(struct rtas_work_area));
if (err)
goto err_destroy;
rwa_state.gen_pool = pool;
rwa_state.available = true;
pr_debug("arena [%pa-%pa] (%uK), min/max alloc sizes %u/%u\n",
&pa_start, &pa_end,
RTAS_WORK_AREA_ARENA_SZ / SZ_1K,
RTAS_WORK_AREA_MIN_ALLOC_SZ,
RTAS_WORK_AREA_MAX_ALLOC_SZ);
return 0;
err_destroy:
gen_pool_destroy(pool);
err_out:
return err;
}
machine_arch_initcall(pseries, rtas_work_area_allocator_init);
/**
* rtas_work_area_reserve_arena() - Reserve memory suitable for RTAS work areas.
*/
void __init rtas_work_area_reserve_arena(const phys_addr_t limit)
{
const phys_addr_t align = RTAS_WORK_AREA_ARENA_ALIGN;
const phys_addr_t size = RTAS_WORK_AREA_ARENA_SZ;
const phys_addr_t min = MEMBLOCK_LOW_LIMIT;
const int nid = NUMA_NO_NODE;
/*
* Too early for a machine_is(pseries) check. But PAPR
* effectively mandates that ibm,get-system-parameter is
* present:
*
* R1–7.3.16–1. All platforms must support the System
* Parameters option.
*
* So set up the arena if we find that, with a fallback to
* ibm,configure-connector, just in case.
*/
if (rtas_function_implemented(RTAS_FN_IBM_GET_SYSTEM_PARAMETER) ||
rtas_function_implemented(RTAS_FN_IBM_CONFIGURE_CONNECTOR))
rwa_state.arena = memblock_alloc_try_nid(size, align, min, limit, nid);
}
| linux-master | arch/powerpc/platforms/pseries/rtas-work-area.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support for Partition Mobility/Migration
*
* Copyright (C) 2010 Nathan Fontenot
* Copyright (C) 2010 IBM Corporation
*/
#define pr_fmt(fmt) "mobility: " fmt
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/nmi.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/stat.h>
#include <linux/stop_machine.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/stringify.h>
#include <asm/machdep.h>
#include <asm/nmi.h>
#include <asm/rtas.h>
#include "pseries.h"
#include "vas.h" /* vas_migration_handler() */
#include "../../kernel/cacheinfo.h"
static struct kobject *mobility_kobj;
struct update_props_workarea {
__be32 phandle;
__be32 state;
__be64 reserved;
__be32 nprops;
} __packed;
#define NODE_ACTION_MASK 0xff000000
#define NODE_COUNT_MASK 0x00ffffff
#define DELETE_DT_NODE 0x01000000
#define UPDATE_DT_NODE 0x02000000
#define ADD_DT_NODE 0x03000000
#define MIGRATION_SCOPE (1)
#define PRRN_SCOPE -2
#ifdef CONFIG_PPC_WATCHDOG
static unsigned int nmi_wd_lpm_factor = 200;
#ifdef CONFIG_SYSCTL
static struct ctl_table nmi_wd_lpm_factor_ctl_table[] = {
{
.procname = "nmi_wd_lpm_factor",
.data = &nmi_wd_lpm_factor,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
},
{}
};
static int __init register_nmi_wd_lpm_factor_sysctl(void)
{
register_sysctl("kernel", nmi_wd_lpm_factor_ctl_table);
return 0;
}
device_initcall(register_nmi_wd_lpm_factor_sysctl);
#endif /* CONFIG_SYSCTL */
#endif /* CONFIG_PPC_WATCHDOG */
static int mobility_rtas_call(int token, char *buf, s32 scope)
{
int rc;
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
return rc;
}
static int delete_dt_node(struct device_node *dn)
{
struct device_node *pdn;
bool is_platfac;
pdn = of_get_parent(dn);
is_platfac = of_node_is_type(dn, "ibm,platform-facilities") ||
of_node_is_type(pdn, "ibm,platform-facilities");
of_node_put(pdn);
/*
* The drivers that bind to nodes in the platform-facilities
* hierarchy don't support node removal, and the removal directive
* from firmware is always followed by an add of an equivalent
* node. The capability (e.g. RNG, encryption, compression)
* represented by the node is never interrupted by the migration.
* So ignore changes to this part of the tree.
*/
if (is_platfac) {
pr_notice("ignoring remove operation for %pOFfp\n", dn);
return 0;
}
pr_debug("removing node %pOFfp\n", dn);
dlpar_detach_node(dn);
return 0;
}
static int update_dt_property(struct device_node *dn, struct property **prop,
const char *name, u32 vd, char *value)
{
struct property *new_prop = *prop;
int more = 0;
/* A negative 'vd' value indicates that only part of the new property
* value is contained in the buffer and we need to call
* ibm,update-properties again to get the rest of the value.
*
* A negative value is also the two's compliment of the actual value.
*/
if (vd & 0x80000000) {
vd = ~vd + 1;
more = 1;
}
if (new_prop) {
/* partial property fixup */
char *new_data = kzalloc(new_prop->length + vd, GFP_KERNEL);
if (!new_data)
return -ENOMEM;
memcpy(new_data, new_prop->value, new_prop->length);
memcpy(new_data + new_prop->length, value, vd);
kfree(new_prop->value);
new_prop->value = new_data;
new_prop->length += vd;
} else {
new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
if (!new_prop)
return -ENOMEM;
new_prop->name = kstrdup(name, GFP_KERNEL);
if (!new_prop->name) {
kfree(new_prop);
return -ENOMEM;
}
new_prop->length = vd;
new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
if (!new_prop->value) {
kfree(new_prop->name);
kfree(new_prop);
return -ENOMEM;
}
memcpy(new_prop->value, value, vd);
*prop = new_prop;
}
if (!more) {
pr_debug("updating node %pOF property %s\n", dn, name);
of_update_property(dn, new_prop);
*prop = NULL;
}
return 0;
}
static int update_dt_node(struct device_node *dn, s32 scope)
{
struct update_props_workarea *upwa;
struct property *prop = NULL;
int i, rc, rtas_rc;
char *prop_data;
char *rtas_buf;
int update_properties_token;
u32 nprops;
u32 vd;
update_properties_token = rtas_function_token(RTAS_FN_IBM_UPDATE_PROPERTIES);
if (update_properties_token == RTAS_UNKNOWN_SERVICE)
return -EINVAL;
rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
if (!rtas_buf)
return -ENOMEM;
upwa = (struct update_props_workarea *)&rtas_buf[0];
upwa->phandle = cpu_to_be32(dn->phandle);
do {
rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf,
scope);
if (rtas_rc < 0)
break;
prop_data = rtas_buf + sizeof(*upwa);
nprops = be32_to_cpu(upwa->nprops);
/* On the first call to ibm,update-properties for a node the
* first property value descriptor contains an empty
* property name, the property value length encoded as u32,
* and the property value is the node path being updated.
*/
if (*prop_data == 0) {
prop_data++;
vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += vd + sizeof(vd);
nprops--;
}
for (i = 0; i < nprops; i++) {
char *prop_name;
prop_name = prop_data;
prop_data += strlen(prop_name) + 1;
vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += sizeof(vd);
switch (vd) {
case 0x00000000:
/* name only property, nothing to do */
break;
case 0x80000000:
of_remove_property(dn, of_find_property(dn,
prop_name, NULL));
prop = NULL;
break;
default:
rc = update_dt_property(dn, &prop, prop_name,
vd, prop_data);
if (rc) {
pr_err("updating %s property failed: %d\n",
prop_name, rc);
}
prop_data += vd;
break;
}
cond_resched();
}
cond_resched();
} while (rtas_rc == 1);
kfree(rtas_buf);
return 0;
}
static int add_dt_node(struct device_node *parent_dn, __be32 drc_index)
{
struct device_node *dn;
int rc;
dn = dlpar_configure_connector(drc_index, parent_dn);
if (!dn)
return -ENOENT;
/*
* Since delete_dt_node() ignores this node type, this is the
* necessary counterpart. We also know that a platform-facilities
* node returned from dlpar_configure_connector() has children
* attached, and dlpar_attach_node() only adds the parent, leaking
* the children. So ignore these on the add side for now.
*/
if (of_node_is_type(dn, "ibm,platform-facilities")) {
pr_notice("ignoring add operation for %pOF\n", dn);
dlpar_free_cc_nodes(dn);
return 0;
}
rc = dlpar_attach_node(dn, parent_dn);
if (rc)
dlpar_free_cc_nodes(dn);
pr_debug("added node %pOFfp\n", dn);
return rc;
}
static int pseries_devicetree_update(s32 scope)
{
char *rtas_buf;
__be32 *data;
int update_nodes_token;
int rc;
update_nodes_token = rtas_function_token(RTAS_FN_IBM_UPDATE_NODES);
if (update_nodes_token == RTAS_UNKNOWN_SERVICE)
return 0;
rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
if (!rtas_buf)
return -ENOMEM;
do {
rc = mobility_rtas_call(update_nodes_token, rtas_buf, scope);
if (rc && rc != 1)
break;
data = (__be32 *)rtas_buf + 4;
while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
int i;
u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
data++;
for (i = 0; i < node_count; i++) {
struct device_node *np;
__be32 phandle = *data++;
__be32 drc_index;
np = of_find_node_by_phandle(be32_to_cpu(phandle));
if (!np) {
pr_warn("Failed lookup: phandle 0x%x for action 0x%x\n",
be32_to_cpu(phandle), action);
continue;
}
switch (action) {
case DELETE_DT_NODE:
delete_dt_node(np);
break;
case UPDATE_DT_NODE:
update_dt_node(np, scope);
break;
case ADD_DT_NODE:
drc_index = *data++;
add_dt_node(np, drc_index);
break;
}
of_node_put(np);
cond_resched();
}
}
cond_resched();
} while (rc == 1);
kfree(rtas_buf);
return rc;
}
void post_mobility_fixup(void)
{
int rc;
rtas_activate_firmware();
/*
* We don't want CPUs to go online/offline while the device
* tree is being updated.
*/
cpus_read_lock();
/*
* It's common for the destination firmware to replace cache
* nodes. Release all of the cacheinfo hierarchy's references
* before updating the device tree.
*/
cacheinfo_teardown();
rc = pseries_devicetree_update(MIGRATION_SCOPE);
if (rc)
pr_err("device tree update failed: %d\n", rc);
cacheinfo_rebuild();
cpus_read_unlock();
/* Possibly switch to a new L1 flush type */
pseries_setup_security_mitigations();
/* Reinitialise system information for hv-24x7 */
read_24x7_sys_info();
return;
}
static int poll_vasi_state(u64 handle, unsigned long *res)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long hvrc;
int ret;
hvrc = plpar_hcall(H_VASI_STATE, retbuf, handle);
switch (hvrc) {
case H_SUCCESS:
ret = 0;
*res = retbuf[0];
break;
case H_PARAMETER:
ret = -EINVAL;
break;
case H_FUNCTION:
ret = -EOPNOTSUPP;
break;
case H_HARDWARE:
default:
pr_err("unexpected H_VASI_STATE result %ld\n", hvrc);
ret = -EIO;
break;
}
return ret;
}
static int wait_for_vasi_session_suspending(u64 handle)
{
unsigned long state;
int ret;
/*
* Wait for transition from H_VASI_ENABLED to
* H_VASI_SUSPENDING. Treat anything else as an error.
*/
while (true) {
ret = poll_vasi_state(handle, &state);
if (ret != 0 || state == H_VASI_SUSPENDING) {
break;
} else if (state == H_VASI_ENABLED) {
ssleep(1);
} else {
pr_err("unexpected H_VASI_STATE result %lu\n", state);
ret = -EIO;
break;
}
}
/*
* Proceed even if H_VASI_STATE is unavailable. If H_JOIN or
* ibm,suspend-me are also unimplemented, we'll recover then.
*/
if (ret == -EOPNOTSUPP)
ret = 0;
return ret;
}
static void wait_for_vasi_session_completed(u64 handle)
{
unsigned long state = 0;
int ret;
pr_info("waiting for memory transfer to complete...\n");
/*
* Wait for transition from H_VASI_RESUMED to H_VASI_COMPLETED.
*/
while (true) {
ret = poll_vasi_state(handle, &state);
/*
* If the memory transfer is already complete and the migration
* has been cleaned up by the hypervisor, H_PARAMETER is return,
* which is translate in EINVAL by poll_vasi_state().
*/
if (ret == -EINVAL || (!ret && state == H_VASI_COMPLETED)) {
pr_info("memory transfer completed.\n");
break;
}
if (ret) {
pr_err("H_VASI_STATE return error (%d)\n", ret);
break;
}
if (state != H_VASI_RESUMED) {
pr_err("unexpected H_VASI_STATE result %lu\n", state);
break;
}
msleep(500);
}
}
static void prod_single(unsigned int target_cpu)
{
long hvrc;
int hwid;
hwid = get_hard_smp_processor_id(target_cpu);
hvrc = plpar_hcall_norets(H_PROD, hwid);
if (hvrc == H_SUCCESS)
return;
pr_err_ratelimited("H_PROD of CPU %u (hwid %d) error: %ld\n",
target_cpu, hwid, hvrc);
}
static void prod_others(void)
{
unsigned int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id())
prod_single(cpu);
}
}
static u16 clamp_slb_size(void)
{
#ifdef CONFIG_PPC_64S_HASH_MMU
u16 prev = mmu_slb_size;
slb_set_size(SLB_MIN_SIZE);
return prev;
#else
return 0;
#endif
}
static int do_suspend(void)
{
u16 saved_slb_size;
int status;
int ret;
pr_info("calling ibm,suspend-me on CPU %i\n", smp_processor_id());
/*
* The destination processor model may have fewer SLB entries
* than the source. We reduce mmu_slb_size to a safe minimum
* before suspending in order to minimize the possibility of
* programming non-existent entries on the destination. If
* suspend fails, we restore it before returning. On success
* the OF reconfig path will update it from the new device
* tree after resuming on the destination.
*/
saved_slb_size = clamp_slb_size();
ret = rtas_ibm_suspend_me(&status);
if (ret != 0) {
pr_err("ibm,suspend-me error: %d\n", status);
slb_set_size(saved_slb_size);
}
return ret;
}
/**
* struct pseries_suspend_info - State shared between CPUs for join/suspend.
* @counter: Threads are to increment this upon resuming from suspend
* or if an error is received from H_JOIN. The thread which performs
* the first increment (i.e. sets it to 1) is responsible for
* waking the other threads.
* @done: False if join/suspend is in progress. True if the operation is
* complete (successful or not).
*/
struct pseries_suspend_info {
atomic_t counter;
bool done;
};
static int do_join(void *arg)
{
struct pseries_suspend_info *info = arg;
atomic_t *counter = &info->counter;
long hvrc;
int ret;
retry:
/* Must ensure MSR.EE off for H_JOIN. */
hard_irq_disable();
hvrc = plpar_hcall_norets(H_JOIN);
switch (hvrc) {
case H_CONTINUE:
/*
* All other CPUs are offline or in H_JOIN. This CPU
* attempts the suspend.
*/
ret = do_suspend();
break;
case H_SUCCESS:
/*
* The suspend is complete and this cpu has received a
* prod, or we've received a stray prod from unrelated
* code (e.g. paravirt spinlocks) and we need to join
* again.
*
* This barrier orders the return from H_JOIN above vs
* the load of info->done. It pairs with the barrier
* in the wakeup/prod path below.
*/
smp_mb();
if (READ_ONCE(info->done) == false) {
pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
smp_processor_id());
goto retry;
}
ret = 0;
break;
case H_BAD_MODE:
case H_HARDWARE:
default:
ret = -EIO;
pr_err_ratelimited("H_JOIN error %ld on CPU %i\n",
hvrc, smp_processor_id());
break;
}
if (atomic_inc_return(counter) == 1) {
pr_info("CPU %u waking all threads\n", smp_processor_id());
WRITE_ONCE(info->done, true);
/*
* This barrier orders the store to info->done vs subsequent
* H_PRODs to wake the other CPUs. It pairs with the barrier
* in the H_SUCCESS case above.
*/
smp_mb();
prod_others();
}
/*
* Execution may have been suspended for several seconds, so reset
* the watchdogs. touch_nmi_watchdog() also touches the soft lockup
* watchdog.
*/
rcu_cpu_stall_reset();
touch_nmi_watchdog();
return ret;
}
/*
* Abort reason code byte 0. We use only the 'Migrating partition' value.
*/
enum vasi_aborting_entity {
ORCHESTRATOR = 1,
VSP_SOURCE = 2,
PARTITION_FIRMWARE = 3,
PLATFORM_FIRMWARE = 4,
VSP_TARGET = 5,
MIGRATING_PARTITION = 6,
};
static void pseries_cancel_migration(u64 handle, int err)
{
u32 reason_code;
u32 detail;
u8 entity;
long hvrc;
entity = MIGRATING_PARTITION;
detail = abs(err) & 0xffffff;
reason_code = (entity << 24) | detail;
hvrc = plpar_hcall_norets(H_VASI_SIGNAL, handle,
H_VASI_SIGNAL_CANCEL, reason_code);
if (hvrc)
pr_err("H_VASI_SIGNAL error: %ld\n", hvrc);
}
static int pseries_suspend(u64 handle)
{
const unsigned int max_attempts = 5;
unsigned int retry_interval_ms = 1;
unsigned int attempt = 1;
int ret;
while (true) {
struct pseries_suspend_info info;
unsigned long vasi_state;
int vasi_err;
info = (struct pseries_suspend_info) {
.counter = ATOMIC_INIT(0),
.done = false,
};
ret = stop_machine(do_join, &info, cpu_online_mask);
if (ret == 0)
break;
/*
* Encountered an error. If the VASI stream is still
* in Suspending state, it's likely a transient
* condition related to some device in the partition
* and we can retry in the hope that the cause has
* cleared after some delay.
*
* A better design would allow drivers etc to prepare
* for the suspend and avoid conditions which prevent
* the suspend from succeeding. For now, we have this
* mitigation.
*/
pr_notice("Partition suspend attempt %u of %u error: %d\n",
attempt, max_attempts, ret);
if (attempt == max_attempts)
break;
vasi_err = poll_vasi_state(handle, &vasi_state);
if (vasi_err == 0) {
if (vasi_state != H_VASI_SUSPENDING) {
pr_notice("VASI state %lu after failed suspend\n",
vasi_state);
break;
}
} else if (vasi_err != -EOPNOTSUPP) {
pr_err("VASI state poll error: %d", vasi_err);
break;
}
pr_notice("Will retry partition suspend after %u ms\n",
retry_interval_ms);
msleep(retry_interval_ms);
retry_interval_ms *= 10;
attempt++;
}
return ret;
}
static int pseries_migrate_partition(u64 handle)
{
int ret;
unsigned int factor = 0;
#ifdef CONFIG_PPC_WATCHDOG
factor = nmi_wd_lpm_factor;
#endif
/*
* When the migration is initiated, the hypervisor changes VAS
* mappings to prepare before OS gets the notification and
* closes all VAS windows. NX generates continuous faults during
* this time and the user space can not differentiate these
* faults from the migration event. So reduce this time window
* by closing VAS windows at the beginning of this function.
*/
vas_migration_handler(VAS_SUSPEND);
ret = wait_for_vasi_session_suspending(handle);
if (ret)
goto out;
if (factor)
watchdog_hardlockup_set_timeout_pct(factor);
ret = pseries_suspend(handle);
if (ret == 0) {
post_mobility_fixup();
/*
* Wait until the memory transfer is complete, so that the user
* space process returns from the syscall after the transfer is
* complete. This allows the user hooks to be executed at the
* right time.
*/
wait_for_vasi_session_completed(handle);
} else
pseries_cancel_migration(handle, ret);
if (factor)
watchdog_hardlockup_set_timeout_pct(0);
out:
vas_migration_handler(VAS_RESUME);
return ret;
}
int rtas_syscall_dispatch_ibm_suspend_me(u64 handle)
{
return pseries_migrate_partition(handle);
}
static ssize_t migration_store(const struct class *class,
const struct class_attribute *attr, const char *buf,
size_t count)
{
u64 streamid;
int rc;
rc = kstrtou64(buf, 0, &streamid);
if (rc)
return rc;
rc = pseries_migrate_partition(streamid);
if (rc)
return rc;
return count;
}
/*
* Used by drmgr to determine the kernel behavior of the migration interface.
*
* Version 1: Performs all PAPR requirements for migration including
* firmware activation and device tree update.
*/
#define MIGRATION_API_VERSION 1
static CLASS_ATTR_WO(migration);
static CLASS_ATTR_STRING(api_version, 0444, __stringify(MIGRATION_API_VERSION));
static int __init mobility_sysfs_init(void)
{
int rc;
mobility_kobj = kobject_create_and_add("mobility", kernel_kobj);
if (!mobility_kobj)
return -ENOMEM;
rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr);
if (rc)
pr_err("unable to create migration sysfs file (%d)\n", rc);
rc = sysfs_create_file(mobility_kobj, &class_attr_api_version.attr.attr);
if (rc)
pr_err("unable to create api_version sysfs file (%d)\n", rc);
return 0;
}
machine_device_initcall(pseries, mobility_sysfs_init);
| linux-master | arch/powerpc/platforms/pseries/mobility.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pSeries_reconfig.c - support for dynamic reconfiguration (including PCI
* Hotplug and Dynamic Logical Partitioning on RPA platforms).
*
* Copyright (C) 2005 Nathan Lynch
* Copyright (C) 2005 IBM Corporation
*/
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <asm/machdep.h>
#include <linux/uaccess.h>
#include <asm/mmu.h>
#include "of_helpers.h"
static int pSeries_reconfig_add_node(const char *path, struct property *proplist)
{
struct device_node *np;
int err = -ENOMEM;
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
goto out_err;
np->full_name = kstrdup(kbasename(path), GFP_KERNEL);
if (!np->full_name)
goto out_err;
np->properties = proplist;
of_node_set_flag(np, OF_DYNAMIC);
of_node_init(np);
np->parent = pseries_of_derive_parent(path);
if (IS_ERR(np->parent)) {
err = PTR_ERR(np->parent);
goto out_err;
}
err = of_attach_node(np);
if (err) {
printk(KERN_ERR "Failed to add device node %s\n", path);
goto out_err;
}
of_node_put(np->parent);
return 0;
out_err:
if (np) {
of_node_put(np->parent);
kfree(np->full_name);
kfree(np);
}
return err;
}
static int pSeries_reconfig_remove_node(struct device_node *np)
{
struct device_node *parent, *child;
parent = of_get_parent(np);
if (!parent)
return -EINVAL;
if ((child = of_get_next_child(np, NULL))) {
of_node_put(child);
of_node_put(parent);
return -EBUSY;
}
of_detach_node(np);
of_node_put(parent);
return 0;
}
/*
* /proc/powerpc/ofdt - yucky binary interface for adding and removing
* OF device nodes. Should be deprecated as soon as we get an
* in-kernel wrapper for the RTAS ibm,configure-connector call.
*/
static void release_prop_list(const struct property *prop)
{
struct property *next;
for (; prop; prop = next) {
next = prop->next;
kfree(prop->name);
kfree(prop->value);
kfree(prop);
}
}
/**
* parse_next_property - process the next property from raw input buffer
* @buf: input buffer, must be nul-terminated
* @end: end of the input buffer + 1, for validation
* @name: return value; set to property name in buf
* @length: return value; set to length of value
* @value: return value; set to the property value in buf
*
* Note that the caller must make copies of the name and value returned,
* this function does no allocation or copying of the data. Return value
* is set to the next name in buf, or NULL on error.
*/
static char * parse_next_property(char *buf, char *end, char **name, int *length,
unsigned char **value)
{
char *tmp;
*name = buf;
tmp = strchr(buf, ' ');
if (!tmp) {
printk(KERN_ERR "property parse failed in %s at line %d\n",
__func__, __LINE__);
return NULL;
}
*tmp = '\0';
if (++tmp >= end) {
printk(KERN_ERR "property parse failed in %s at line %d\n",
__func__, __LINE__);
return NULL;
}
/* now we're on the length */
*length = -1;
*length = simple_strtoul(tmp, &tmp, 10);
if (*length == -1) {
printk(KERN_ERR "property parse failed in %s at line %d\n",
__func__, __LINE__);
return NULL;
}
if (*tmp != ' ' || ++tmp >= end) {
printk(KERN_ERR "property parse failed in %s at line %d\n",
__func__, __LINE__);
return NULL;
}
/* now we're on the value */
*value = tmp;
tmp += *length;
if (tmp > end) {
printk(KERN_ERR "property parse failed in %s at line %d\n",
__func__, __LINE__);
return NULL;
}
else if (tmp < end && *tmp != ' ' && *tmp != '\0') {
printk(KERN_ERR "property parse failed in %s at line %d\n",
__func__, __LINE__);
return NULL;
}
tmp++;
/* and now we should be on the next name, or the end */
return tmp;
}
static struct property *new_property(const char *name, const int length,
const unsigned char *value, struct property *last)
{
struct property *new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
return NULL;
if (!(new->name = kstrdup(name, GFP_KERNEL)))
goto cleanup;
if (!(new->value = kmalloc(length + 1, GFP_KERNEL)))
goto cleanup;
memcpy(new->value, value, length);
*(((char *)new->value) + length) = 0;
new->length = length;
new->next = last;
return new;
cleanup:
kfree(new->name);
kfree(new->value);
kfree(new);
return NULL;
}
static int do_add_node(char *buf, size_t bufsize)
{
char *path, *end, *name;
struct device_node *np;
struct property *prop = NULL;
unsigned char* value;
int length, rv = 0;
end = buf + bufsize;
path = buf;
buf = strchr(buf, ' ');
if (!buf)
return -EINVAL;
*buf = '\0';
buf++;
if ((np = of_find_node_by_path(path))) {
of_node_put(np);
return -EINVAL;
}
/* rv = build_prop_list(tmp, bufsize - (tmp - buf), &proplist); */
while (buf < end &&
(buf = parse_next_property(buf, end, &name, &length, &value))) {
struct property *last = prop;
prop = new_property(name, length, value, last);
if (!prop) {
rv = -ENOMEM;
prop = last;
goto out;
}
}
if (!buf) {
rv = -EINVAL;
goto out;
}
rv = pSeries_reconfig_add_node(path, prop);
out:
if (rv)
release_prop_list(prop);
return rv;
}
static int do_remove_node(char *buf)
{
struct device_node *node;
int rv = -ENODEV;
if ((node = of_find_node_by_path(buf)))
rv = pSeries_reconfig_remove_node(node);
of_node_put(node);
return rv;
}
static char *parse_node(char *buf, size_t bufsize, struct device_node **npp)
{
char *handle_str;
phandle handle;
*npp = NULL;
handle_str = buf;
buf = strchr(buf, ' ');
if (!buf)
return NULL;
*buf = '\0';
buf++;
handle = simple_strtoul(handle_str, NULL, 0);
*npp = of_find_node_by_phandle(handle);
return buf;
}
static int do_add_property(char *buf, size_t bufsize)
{
struct property *prop = NULL;
struct device_node *np;
unsigned char *value;
char *name, *end;
int length;
end = buf + bufsize;
buf = parse_node(buf, bufsize, &np);
if (!np)
return -ENODEV;
if (parse_next_property(buf, end, &name, &length, &value) == NULL)
return -EINVAL;
prop = new_property(name, length, value, NULL);
if (!prop)
return -ENOMEM;
of_add_property(np, prop);
return 0;
}
static int do_remove_property(char *buf, size_t bufsize)
{
struct device_node *np;
char *tmp;
buf = parse_node(buf, bufsize, &np);
if (!np)
return -ENODEV;
tmp = strchr(buf,' ');
if (tmp)
*tmp = '\0';
if (strlen(buf) == 0)
return -EINVAL;
return of_remove_property(np, of_find_property(np, buf, NULL));
}
static int do_update_property(char *buf, size_t bufsize)
{
struct device_node *np;
unsigned char *value;
char *name, *end, *next_prop;
int length;
struct property *newprop;
buf = parse_node(buf, bufsize, &np);
end = buf + bufsize;
if (!np)
return -ENODEV;
next_prop = parse_next_property(buf, end, &name, &length, &value);
if (!next_prop)
return -EINVAL;
if (!strlen(name))
return -ENODEV;
newprop = new_property(name, length, value, NULL);
if (!newprop)
return -ENOMEM;
if (!strcmp(name, "slb-size") || !strcmp(name, "ibm,slb-size"))
slb_set_size(*(int *)value);
return of_update_property(np, newprop);
}
/**
* ofdt_write - perform operations on the Open Firmware device tree
*
* @file: not used
* @buf: command and arguments
* @count: size of the command buffer
* @off: not used
*
* Operations supported at this time are addition and removal of
* whole nodes along with their properties. Operations on individual
* properties are not implemented (yet).
*/
static ssize_t ofdt_write(struct file *file, const char __user *buf, size_t count,
loff_t *off)
{
int rv;
char *kbuf;
char *tmp;
rv = security_locked_down(LOCKDOWN_DEVICE_TREE);
if (rv)
return rv;
kbuf = memdup_user_nul(buf, count);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
tmp = strchr(kbuf, ' ');
if (!tmp) {
rv = -EINVAL;
goto out;
}
*tmp = '\0';
tmp++;
if (!strcmp(kbuf, "add_node"))
rv = do_add_node(tmp, count - (tmp - kbuf));
else if (!strcmp(kbuf, "remove_node"))
rv = do_remove_node(tmp);
else if (!strcmp(kbuf, "add_property"))
rv = do_add_property(tmp, count - (tmp - kbuf));
else if (!strcmp(kbuf, "remove_property"))
rv = do_remove_property(tmp, count - (tmp - kbuf));
else if (!strcmp(kbuf, "update_property"))
rv = do_update_property(tmp, count - (tmp - kbuf));
else
rv = -EINVAL;
out:
kfree(kbuf);
return rv ? rv : count;
}
static const struct proc_ops ofdt_proc_ops = {
.proc_write = ofdt_write,
.proc_lseek = noop_llseek,
};
/* create /proc/powerpc/ofdt write-only by root */
static int proc_ppc64_create_ofdt(void)
{
struct proc_dir_entry *ent;
ent = proc_create("powerpc/ofdt", 0200, NULL, &ofdt_proc_ops);
if (ent)
proc_set_size(ent, 0);
return 0;
}
machine_device_initcall(pseries, proc_ppc64_create_ofdt);
| linux-master | arch/powerpc/platforms/pseries/reconfig.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2010 Brian King IBM Corporation
*/
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/suspend.h>
#include <linux/stat.h>
#include <asm/firmware.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/rtas.h>
#include <asm/topology.h>
static struct device suspend_dev;
/**
* pseries_suspend_begin - First phase of hibernation
*
* Check to ensure we are in a valid state to hibernate
*
* Return value:
* 0 on success / other on failure
**/
static int pseries_suspend_begin(u64 stream_id)
{
long vasi_state, rc;
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
/* Make sure the state is valid */
rc = plpar_hcall(H_VASI_STATE, retbuf, stream_id);
vasi_state = retbuf[0];
if (rc) {
pr_err("pseries_suspend_begin: vasi_state returned %ld\n",rc);
return rc;
} else if (vasi_state == H_VASI_ENABLED) {
return -EAGAIN;
} else if (vasi_state != H_VASI_SUSPENDING) {
pr_err("pseries_suspend_begin: vasi_state returned state %ld\n",
vasi_state);
return -EIO;
}
return 0;
}
/**
* pseries_suspend_enter - Final phase of hibernation
*
* Return value:
* 0 on success / other on failure
**/
static int pseries_suspend_enter(suspend_state_t state)
{
return rtas_ibm_suspend_me(NULL);
}
/**
* store_hibernate - Initiate partition hibernation
* @dev: subsys root device
* @attr: device attribute struct
* @buf: buffer
* @count: buffer size
*
* Write the stream ID received from the HMC to this file
* to trigger hibernating the partition
*
* Return value:
* number of bytes printed to buffer / other on failure
**/
static ssize_t store_hibernate(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u64 stream_id;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
stream_id = simple_strtoul(buf, NULL, 16);
do {
rc = pseries_suspend_begin(stream_id);
if (rc == -EAGAIN)
ssleep(1);
} while (rc == -EAGAIN);
if (!rc)
rc = pm_suspend(PM_SUSPEND_MEM);
if (!rc) {
rc = count;
post_mobility_fixup();
}
return rc;
}
#define USER_DT_UPDATE 0
#define KERN_DT_UPDATE 1
/**
* show_hibernate - Report device tree update responsibilty
* @dev: subsys root device
* @attr: device attribute struct
* @buf: buffer
*
* Report whether a device tree update is performed by the kernel after a
* resume, or if drmgr must coordinate the update from user space.
*
* Return value:
* 0 if drmgr is to initiate update, and 1 otherwise
**/
static ssize_t show_hibernate(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", KERN_DT_UPDATE);
}
static DEVICE_ATTR(hibernate, 0644, show_hibernate, store_hibernate);
static struct bus_type suspend_subsys = {
.name = "power",
.dev_name = "power",
};
static const struct platform_suspend_ops pseries_suspend_ops = {
.valid = suspend_valid_only_mem,
.enter = pseries_suspend_enter,
};
/**
* pseries_suspend_sysfs_register - Register with sysfs
*
* Return value:
* 0 on success / other on failure
**/
static int pseries_suspend_sysfs_register(struct device *dev)
{
struct device *dev_root;
int rc;
if ((rc = subsys_system_register(&suspend_subsys, NULL)))
return rc;
dev->id = 0;
dev->bus = &suspend_subsys;
dev_root = bus_get_dev_root(&suspend_subsys);
if (dev_root) {
rc = device_create_file(dev_root, &dev_attr_hibernate);
put_device(dev_root);
if (rc)
goto subsys_unregister;
}
return 0;
subsys_unregister:
bus_unregister(&suspend_subsys);
return rc;
}
/**
* pseries_suspend_init - initcall for pSeries suspend
*
* Return value:
* 0 on success / other on failure
**/
static int __init pseries_suspend_init(void)
{
int rc;
if (!firmware_has_feature(FW_FEATURE_LPAR))
return 0;
if ((rc = pseries_suspend_sysfs_register(&suspend_dev)))
return rc;
suspend_set_ops(&pseries_suspend_ops);
return 0;
}
machine_device_initcall(pseries, pseries_suspend_init);
| linux-master | arch/powerpc/platforms/pseries/suspend.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001 Dave Engebretsen, IBM Corporation
* Copyright (C) 2003 Anton Blanchard <[email protected]>, IBM
*
* pSeries specific routines for PCI.
*/
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/string.h>
#include <asm/eeh.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/pci.h>
#include "pseries.h"
#if 0
void pcibios_name_device(struct pci_dev *dev)
{
struct device_node *dn;
/*
* Add IBM loc code (slot) as a prefix to the device names for service
*/
dn = pci_device_to_OF_node(dev);
if (dn) {
const char *loc_code = of_get_property(dn, "ibm,loc-code",
NULL);
if (loc_code) {
int loc_len = strlen(loc_code);
if (loc_len < sizeof(dev->dev.name)) {
memmove(dev->dev.name+loc_len+1, dev->dev.name,
sizeof(dev->dev.name)-loc_len-1);
memcpy(dev->dev.name, loc_code, loc_len);
dev->dev.name[loc_len] = ' ';
dev->dev.name[sizeof(dev->dev.name)-1] = '\0';
}
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device);
#endif
#ifdef CONFIG_PCI_IOV
#define MAX_VFS_FOR_MAP_PE 256
struct pe_map_bar_entry {
__be64 bar; /* Input: Virtual Function BAR */
__be16 rid; /* Input: Virtual Function Router ID */
__be16 pe_num; /* Output: Virtual Function PE Number */
__be32 reserved; /* Reserved Space */
};
static int pseries_send_map_pe(struct pci_dev *pdev, u16 num_vfs,
struct pe_map_bar_entry *vf_pe_array)
{
struct pci_dn *pdn;
int rc;
unsigned long buid, addr;
int ibm_map_pes = rtas_function_token(RTAS_FN_IBM_OPEN_SRIOV_MAP_PE_NUMBER);
if (ibm_map_pes == RTAS_UNKNOWN_SERVICE)
return -EINVAL;
pdn = pci_get_pdn(pdev);
addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
buid = pdn->phb->buid;
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, vf_pe_array,
RTAS_DATA_BUF_SIZE);
rc = rtas_call(ibm_map_pes, 5, 1, NULL, addr,
BUID_HI(buid), BUID_LO(buid),
rtas_data_buf,
num_vfs * sizeof(struct pe_map_bar_entry));
memcpy(vf_pe_array, rtas_data_buf, RTAS_DATA_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
if (rc)
dev_err(&pdev->dev,
"%s: Failed to associate pes PE#%lx, rc=%x\n",
__func__, addr, rc);
return rc;
}
static void pseries_set_pe_num(struct pci_dev *pdev, u16 vf_index, __be16 pe_num)
{
struct pci_dn *pdn;
pdn = pci_get_pdn(pdev);
pdn->pe_num_map[vf_index] = be16_to_cpu(pe_num);
dev_dbg(&pdev->dev, "VF %04x:%02x:%02x.%x associated with PE#%x\n",
pci_domain_nr(pdev->bus),
pdev->bus->number,
PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)),
pdn->pe_num_map[vf_index]);
}
static int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs)
{
struct pci_dn *pdn;
int i, rc, vf_index;
struct pe_map_bar_entry *vf_pe_array;
struct resource *res;
u64 size;
vf_pe_array = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
if (!vf_pe_array)
return -ENOMEM;
pdn = pci_get_pdn(pdev);
/* create firmware structure to associate pes */
for (vf_index = 0; vf_index < num_vfs; vf_index++) {
pdn->pe_num_map[vf_index] = IODA_INVALID_PE;
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
res = &pdev->resource[i + PCI_IOV_RESOURCES];
if (!res->parent)
continue;
size = pcibios_iov_resource_alignment(pdev, i +
PCI_IOV_RESOURCES);
vf_pe_array[vf_index].bar =
cpu_to_be64(res->start + size * vf_index);
vf_pe_array[vf_index].rid =
cpu_to_be16((pci_iov_virtfn_bus(pdev, vf_index)
<< 8) | pci_iov_virtfn_devfn(pdev,
vf_index));
vf_pe_array[vf_index].pe_num =
cpu_to_be16(IODA_INVALID_PE);
}
}
rc = pseries_send_map_pe(pdev, num_vfs, vf_pe_array);
/* Only zero is success */
if (!rc)
for (vf_index = 0; vf_index < num_vfs; vf_index++)
pseries_set_pe_num(pdev, vf_index,
vf_pe_array[vf_index].pe_num);
kfree(vf_pe_array);
return rc;
}
static int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
struct pci_dn *pdn;
int rc;
const int *max_vfs;
int max_config_vfs;
struct device_node *dn = pci_device_to_OF_node(pdev);
max_vfs = of_get_property(dn, "ibm,number-of-configurable-vfs", NULL);
if (!max_vfs)
return -EINVAL;
/* First integer stores max config */
max_config_vfs = of_read_number(&max_vfs[0], 1);
if (max_config_vfs < num_vfs && num_vfs > MAX_VFS_FOR_MAP_PE) {
dev_err(&pdev->dev,
"Num VFs %x > %x Configurable VFs\n",
num_vfs, (num_vfs > MAX_VFS_FOR_MAP_PE) ?
MAX_VFS_FOR_MAP_PE : max_config_vfs);
return -EINVAL;
}
pdn = pci_get_pdn(pdev);
pdn->pe_num_map = kmalloc_array(num_vfs,
sizeof(*pdn->pe_num_map),
GFP_KERNEL);
if (!pdn->pe_num_map)
return -ENOMEM;
rc = pseries_associate_pes(pdev, num_vfs);
/* Anything other than zero is failure */
if (rc) {
dev_err(&pdev->dev, "Failure to enable sriov: %x\n", rc);
kfree(pdn->pe_num_map);
} else {
pci_vf_drivers_autoprobe(pdev, false);
}
return rc;
}
static int pseries_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
/* Allocate PCI data */
add_sriov_vf_pdns(pdev);
return pseries_pci_sriov_enable(pdev, num_vfs);
}
static int pseries_pcibios_sriov_disable(struct pci_dev *pdev)
{
struct pci_dn *pdn;
pdn = pci_get_pdn(pdev);
/* Releasing pe_num_map */
kfree(pdn->pe_num_map);
/* Release PCI data */
remove_sriov_vf_pdns(pdev);
pci_vf_drivers_autoprobe(pdev, true);
return 0;
}
#endif
static void __init pSeries_request_regions(void)
{
if (!isa_io_base)
return;
request_region(0x20,0x20,"pic1");
request_region(0xa0,0x20,"pic2");
request_region(0x00,0x20,"dma1");
request_region(0x40,0x20,"timer");
request_region(0x80,0x10,"dma page reg");
request_region(0xc0,0x20,"dma2");
}
void __init pSeries_final_fixup(void)
{
pSeries_request_regions();
eeh_show_enabled();
#ifdef CONFIG_PCI_IOV
ppc_md.pcibios_sriov_enable = pseries_pcibios_sriov_enable;
ppc_md.pcibios_sriov_disable = pseries_pcibios_sriov_disable;
#endif
}
/*
* Assume the winbond 82c105 is the IDE controller on a
* p610/p615/p630. We should probably be more careful in case
* someone tries to plug in a similar adapter.
*/
static void fixup_winbond_82c105(struct pci_dev* dev)
{
struct resource *r;
unsigned int reg;
if (!machine_is(pseries))
return;
printk("Using INTC for W82c105 IDE controller.\n");
pci_read_config_dword(dev, 0x40, ®);
/* Enable LEGIRQ to use INTC instead of ISA interrupts */
pci_write_config_dword(dev, 0x40, reg | (1<<11));
pci_dev_for_each_resource(dev, r) {
/* zap the 2nd function of the winbond chip */
if (dev->bus->number == 0 && dev->devfn == 0x81 &&
r->flags & IORESOURCE_IO)
r->flags &= ~IORESOURCE_IO;
if (r->start == 0 && r->end) {
r->flags = 0;
r->end = 0;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
fixup_winbond_82c105);
static enum pci_bus_speed prop_to_pci_speed(u32 prop)
{
switch (prop) {
case 0x01:
return PCIE_SPEED_2_5GT;
case 0x02:
return PCIE_SPEED_5_0GT;
case 0x04:
return PCIE_SPEED_8_0GT;
case 0x08:
return PCIE_SPEED_16_0GT;
case 0x10:
return PCIE_SPEED_32_0GT;
default:
pr_debug("Unexpected PCI link speed property value\n");
return PCI_SPEED_UNKNOWN;
}
}
int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct device_node *dn, *pdn;
struct pci_bus *bus;
u32 pcie_link_speed_stats[2];
int rc;
bus = bridge->bus;
/* Rely on the pcibios_free_controller_deferred() callback. */
pci_set_host_bridge_release(bridge, pcibios_free_controller_deferred,
(void *) pci_bus_to_host(bus));
dn = pcibios_get_phb_of_node(bus);
if (!dn)
return 0;
for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
rc = of_property_read_u32_array(pdn,
"ibm,pcie-link-speed-stats",
&pcie_link_speed_stats[0], 2);
if (!rc)
break;
}
of_node_put(pdn);
if (rc) {
pr_debug("no ibm,pcie-link-speed-stats property\n");
return 0;
}
bus->max_bus_speed = prop_to_pci_speed(pcie_link_speed_stats[0]);
bus->cur_bus_speed = prop_to_pci_speed(pcie_link_speed_stats[1]);
return 0;
}
| linux-master | arch/powerpc/platforms/pseries/pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2006 Michael Ellerman, IBM Corporation
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/firmware.h>
#include <asm/kexec.h>
#include <asm/xics.h>
#include <asm/xive.h>
#include <asm/smp.h>
#include <asm/plpar_wrappers.h>
#include "pseries.h"
void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
{
/*
* Don't risk a hypervisor call if we're crashing
* XXX: Why? The hypervisor is not crashing. It might be better
* to at least attempt unregister to avoid the hypervisor stepping
* on our memory.
*/
if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
int ret;
int cpu = smp_processor_id();
int hwcpu = hard_smp_processor_id();
if (get_lppaca()->dtl_enable_mask) {
ret = unregister_dtl(hwcpu);
if (ret) {
pr_err("WARNING: DTL deregistration for cpu "
"%d (hw %d) failed with %d\n",
cpu, hwcpu, ret);
}
}
ret = unregister_slb_shadow(hwcpu);
if (ret) {
pr_err("WARNING: SLB shadow buffer deregistration "
"for cpu %d (hw %d) failed with %d\n",
cpu, hwcpu, ret);
}
ret = unregister_vpa(hwcpu);
if (ret) {
pr_err("WARNING: VPA deregistration for cpu %d "
"(hw %d) failed with %d\n", cpu, hwcpu, ret);
}
}
if (xive_enabled()) {
xive_teardown_cpu();
if (!secondary)
xive_shutdown();
} else
xics_kexec_teardown_cpu(secondary);
}
void pseries_machine_kexec(struct kimage *image)
{
if (firmware_has_feature(FW_FEATURE_SET_MODE))
pseries_disable_reloc_on_exc();
default_machine_kexec(image);
}
| linux-master | arch/powerpc/platforms/pseries/kexec.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PowerPC64 LPAR Configuration Information Driver
*
* Dave Engebretsen [email protected]
* Copyright (c) 2003 Dave Engebretsen
* Will Schmidt [email protected]
* SPLPAR updates, Copyright (c) 2003 Will Schmidt IBM Corporation.
* seq_file updates, Copyright (c) 2004 Will Schmidt IBM Corporation.
* Nathan Lynch [email protected]
* Added lparcfg_write, Copyright (C) 2004 Nathan Lynch IBM Corporation.
*
* This driver creates a proc file at /proc/ppc64/lparcfg which contains
* keyword - value pairs that specify the configuration of the partition.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <asm/papr-sysparm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <asm/lppaca.h>
#include <asm/hvcall.h>
#include <asm/firmware.h>
#include <asm/rtas.h>
#include <asm/time.h>
#include <asm/vdso_datapage.h>
#include <asm/vio.h>
#include <asm/mmu.h>
#include <asm/machdep.h>
#include <asm/drmem.h>
#include "pseries.h"
#include "vas.h" /* pseries_vas_dlpar_cpu() */
/*
* This isn't a module but we expose that to userspace
* via /proc so leave the definitions here
*/
#define MODULE_VERS "1.9"
#define MODULE_NAME "lparcfg"
/* #define LPARCFG_DEBUG */
/*
* Track sum of all purrs across all processors. This is used to further
* calculate usage values by different applications
*/
static void cpu_get_purr(void *arg)
{
atomic64_t *sum = arg;
atomic64_add(mfspr(SPRN_PURR), sum);
}
static unsigned long get_purr(void)
{
atomic64_t purr = ATOMIC64_INIT(0);
on_each_cpu(cpu_get_purr, &purr, 1);
return atomic64_read(&purr);
}
/*
* Methods used to fetch LPAR data when running on a pSeries platform.
*/
struct hvcall_ppp_data {
u64 entitlement;
u64 unallocated_entitlement;
u16 group_num;
u16 pool_num;
u8 capped;
u8 weight;
u8 unallocated_weight;
u16 active_procs_in_pool;
u16 active_system_procs;
u16 phys_platform_procs;
u32 max_proc_cap_avail;
u32 entitled_proc_cap_avail;
};
/*
* H_GET_PPP hcall returns info in 4 parms.
* entitled_capacity,unallocated_capacity,
* aggregation, resource_capability).
*
* R4 = Entitled Processor Capacity Percentage.
* R5 = Unallocated Processor Capacity Percentage.
* R6 (AABBCCDDEEFFGGHH).
* XXXX - reserved (0)
* XXXX - reserved (0)
* XXXX - Group Number
* XXXX - Pool Number.
* R7 (IIJJKKLLMMNNOOPP).
* XX - reserved. (0)
* XX - bit 0-6 reserved (0). bit 7 is Capped indicator.
* XX - variable processor Capacity Weight
* XX - Unallocated Variable Processor Capacity Weight.
* XXXX - Active processors in Physical Processor Pool.
* XXXX - Processors active on platform.
* R8 (QQQQRRRRRRSSSSSS). if ibm,partition-performance-parameters-level >= 1
* XXXX - Physical platform procs allocated to virtualization.
* XXXXXX - Max procs capacity % available to the partitions pool.
* XXXXXX - Entitled procs capacity % available to the
* partitions pool.
*/
static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
{
unsigned long rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
rc = plpar_hcall9(H_GET_PPP, retbuf);
ppp_data->entitlement = retbuf[0];
ppp_data->unallocated_entitlement = retbuf[1];
ppp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
ppp_data->pool_num = retbuf[2] & 0xffff;
ppp_data->capped = (retbuf[3] >> 6 * 8) & 0x01;
ppp_data->weight = (retbuf[3] >> 5 * 8) & 0xff;
ppp_data->unallocated_weight = (retbuf[3] >> 4 * 8) & 0xff;
ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;
ppp_data->active_system_procs = retbuf[3] & 0xffff;
ppp_data->phys_platform_procs = retbuf[4] >> 6 * 8;
ppp_data->max_proc_cap_avail = (retbuf[4] >> 3 * 8) & 0xffffff;
ppp_data->entitled_proc_cap_avail = retbuf[4] & 0xffffff;
return rc;
}
static void show_gpci_data(struct seq_file *m)
{
struct hv_gpci_request_buffer *buf;
unsigned int affinity_score;
long ret;
buf = kmalloc(sizeof(*buf), GFP_KERNEL);
if (buf == NULL)
return;
/*
* Show the local LPAR's affinity score.
*
* 0xB1 selects the Affinity_Domain_Info_By_Partition subcall.
* The score is at byte 0xB in the output buffer.
*/
memset(&buf->params, 0, sizeof(buf->params));
buf->params.counter_request = cpu_to_be32(0xB1);
buf->params.starting_index = cpu_to_be32(-1); /* local LPAR */
buf->params.counter_info_version_in = 0x5; /* v5+ for score */
ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(buf),
sizeof(*buf));
if (ret != H_SUCCESS) {
pr_debug("hcall failed: H_GET_PERF_COUNTER_INFO: %ld, %x\n",
ret, be32_to_cpu(buf->params.detail_rc));
goto out;
}
affinity_score = buf->bytes[0xB];
seq_printf(m, "partition_affinity_score=%u\n", affinity_score);
out:
kfree(buf);
}
static unsigned h_pic(unsigned long *pool_idle_time,
unsigned long *num_procs)
{
unsigned long rc;
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
rc = plpar_hcall(H_PIC, retbuf);
*pool_idle_time = retbuf[0];
*num_procs = retbuf[1];
return rc;
}
/*
* parse_ppp_data
* Parse out the data returned from h_get_ppp and h_pic
*/
static void parse_ppp_data(struct seq_file *m)
{
struct hvcall_ppp_data ppp_data;
struct device_node *root;
const __be32 *perf_level;
int rc;
rc = h_get_ppp(&ppp_data);
if (rc)
return;
seq_printf(m, "partition_entitled_capacity=%lld\n",
ppp_data.entitlement);
seq_printf(m, "group=%d\n", ppp_data.group_num);
seq_printf(m, "system_active_processors=%d\n",
ppp_data.active_system_procs);
/* pool related entries are appropriate for shared configs */
if (lppaca_shared_proc()) {
unsigned long pool_idle_time, pool_procs;
seq_printf(m, "pool=%d\n", ppp_data.pool_num);
/* report pool_capacity in percentage */
seq_printf(m, "pool_capacity=%d\n",
ppp_data.active_procs_in_pool * 100);
h_pic(&pool_idle_time, &pool_procs);
seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
}
seq_printf(m, "unallocated_capacity_weight=%d\n",
ppp_data.unallocated_weight);
seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
seq_printf(m, "capped=%d\n", ppp_data.capped);
seq_printf(m, "unallocated_capacity=%lld\n",
ppp_data.unallocated_entitlement);
/* The last bits of information returned from h_get_ppp are only
* valid if the ibm,partition-performance-parameters-level
* property is >= 1.
*/
root = of_find_node_by_path("/");
if (root) {
perf_level = of_get_property(root,
"ibm,partition-performance-parameters-level",
NULL);
if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
seq_printf(m,
"physical_procs_allocated_to_virtualization=%d\n",
ppp_data.phys_platform_procs);
seq_printf(m, "max_proc_capacity_available=%d\n",
ppp_data.max_proc_cap_avail);
seq_printf(m, "entitled_proc_capacity_available=%d\n",
ppp_data.entitled_proc_cap_avail);
}
of_node_put(root);
}
}
/**
* parse_mpp_data
* Parse out data returned from h_get_mpp
*/
static void parse_mpp_data(struct seq_file *m)
{
struct hvcall_mpp_data mpp_data;
int rc;
rc = h_get_mpp(&mpp_data);
if (rc)
return;
seq_printf(m, "entitled_memory=%ld\n", mpp_data.entitled_mem);
if (mpp_data.mapped_mem != -1)
seq_printf(m, "mapped_entitled_memory=%ld\n",
mpp_data.mapped_mem);
seq_printf(m, "entitled_memory_group_number=%d\n", mpp_data.group_num);
seq_printf(m, "entitled_memory_pool_number=%d\n", mpp_data.pool_num);
seq_printf(m, "entitled_memory_weight=%d\n", mpp_data.mem_weight);
seq_printf(m, "unallocated_entitled_memory_weight=%d\n",
mpp_data.unallocated_mem_weight);
seq_printf(m, "unallocated_io_mapping_entitlement=%ld\n",
mpp_data.unallocated_entitlement);
if (mpp_data.pool_size != -1)
seq_printf(m, "entitled_memory_pool_size=%ld bytes\n",
mpp_data.pool_size);
seq_printf(m, "entitled_memory_loan_request=%ld\n",
mpp_data.loan_request);
seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);
}
/**
* parse_mpp_x_data
* Parse out data returned from h_get_mpp_x
*/
static void parse_mpp_x_data(struct seq_file *m)
{
struct hvcall_mpp_x_data mpp_x_data;
if (!firmware_has_feature(FW_FEATURE_XCMO))
return;
if (h_get_mpp_x(&mpp_x_data))
return;
seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes);
if (mpp_x_data.pool_coalesced_bytes)
seq_printf(m, "pool_coalesced_bytes=%ld\n",
mpp_x_data.pool_coalesced_bytes);
if (mpp_x_data.pool_purr_cycles)
seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles);
if (mpp_x_data.pool_spurr_cycles)
seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles);
}
/*
* Read the lpar name using the RTAS ibm,get-system-parameter call.
*
* The name read through this call is updated if changes are made by the end
* user on the hypervisor side.
*
* Some hypervisor (like Qemu) may not provide this value. In that case, a non
* null value is returned.
*/
static int read_rtas_lpar_name(struct seq_file *m)
{
struct papr_sysparm_buf *buf;
int err;
buf = papr_sysparm_buf_alloc();
if (!buf)
return -ENOMEM;
err = papr_sysparm_get(PAPR_SYSPARM_LPAR_NAME, buf);
if (!err)
seq_printf(m, "partition_name=%s\n", buf->val);
papr_sysparm_buf_free(buf);
return err;
}
/*
* Read the LPAR name from the Device Tree.
*
* The value read in the DT is not updated if the end-user is touching the LPAR
* name on the hypervisor side.
*/
static int read_dt_lpar_name(struct seq_file *m)
{
const char *name;
if (of_property_read_string(of_root, "ibm,partition-name", &name))
return -ENOENT;
seq_printf(m, "partition_name=%s\n", name);
return 0;
}
static void read_lpar_name(struct seq_file *m)
{
if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
pr_err_once("Error can't get the LPAR name");
}
#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
/*
* parse_system_parameter_string()
* Retrieve the potential_processors, max_entitled_capacity and friends
* through the get-system-parameter rtas call. Replace keyword strings as
* necessary.
*/
static void parse_system_parameter_string(struct seq_file *m)
{
struct papr_sysparm_buf *buf;
buf = papr_sysparm_buf_alloc();
if (!buf)
return;
if (papr_sysparm_get(PAPR_SYSPARM_SHARED_PROC_LPAR_ATTRS, buf)) {
goto out_free;
} else {
const char *local_buffer;
int splpar_strlen;
int idx, w_idx;
char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
if (!workbuffer)
goto out_free;
splpar_strlen = be16_to_cpu(buf->len);
local_buffer = buf->val;
w_idx = 0;
idx = 0;
while ((*local_buffer) && (idx < splpar_strlen)) {
workbuffer[w_idx++] = local_buffer[idx++];
if ((local_buffer[idx] == ',')
|| (local_buffer[idx] == '\0')) {
workbuffer[w_idx] = '\0';
if (w_idx) {
/* avoid the empty string */
seq_printf(m, "%s\n", workbuffer);
}
memset(workbuffer, 0, SPLPAR_MAXLENGTH);
idx++; /* skip the comma */
w_idx = 0;
} else if (local_buffer[idx] == '=') {
/* code here to replace workbuffer contents
with different keyword strings */
if (0 == strcmp(workbuffer, "MaxEntCap")) {
strcpy(workbuffer,
"partition_max_entitled_capacity");
w_idx = strlen(workbuffer);
}
if (0 == strcmp(workbuffer, "MaxPlatProcs")) {
strcpy(workbuffer,
"system_potential_processors");
w_idx = strlen(workbuffer);
}
}
}
kfree(workbuffer);
local_buffer -= 2; /* back up over strlen value */
}
out_free:
papr_sysparm_buf_free(buf);
}
/* Return the number of processors in the system.
* This function reads through the device tree and counts
* the virtual processors, this does not include threads.
*/
static int lparcfg_count_active_processors(void)
{
struct device_node *cpus_dn;
int count = 0;
for_each_node_by_type(cpus_dn, "cpu") {
#ifdef LPARCFG_DEBUG
printk(KERN_ERR "cpus_dn %p\n", cpus_dn);
#endif
count++;
}
return count;
}
static void pseries_cmo_data(struct seq_file *m)
{
int cpu;
unsigned long cmo_faults = 0;
unsigned long cmo_fault_time = 0;
seq_printf(m, "cmo_enabled=%d\n", firmware_has_feature(FW_FEATURE_CMO));
if (!firmware_has_feature(FW_FEATURE_CMO))
return;
for_each_possible_cpu(cpu) {
cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);
cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);
}
seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
seq_printf(m, "cmo_fault_time_usec=%lu\n",
cmo_fault_time / tb_ticks_per_usec);
seq_printf(m, "cmo_primary_psp=%d\n", cmo_get_primary_psp());
seq_printf(m, "cmo_secondary_psp=%d\n", cmo_get_secondary_psp());
seq_printf(m, "cmo_page_size=%lu\n", cmo_get_page_size());
}
static void splpar_dispatch_data(struct seq_file *m)
{
int cpu;
unsigned long dispatches = 0;
unsigned long dispatch_dispersions = 0;
for_each_possible_cpu(cpu) {
dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);
dispatch_dispersions +=
be32_to_cpu(lppaca_of(cpu).dispersion_count);
}
seq_printf(m, "dispatches=%lu\n", dispatches);
seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);
}
static void parse_em_data(struct seq_file *m)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
if (firmware_has_feature(FW_FEATURE_LPAR) &&
plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
}
static void maxmem_data(struct seq_file *m)
{
unsigned long maxmem = 0;
maxmem += (unsigned long)drmem_info->n_lmbs * drmem_info->lmb_size;
maxmem += hugetlb_total_pages() * PAGE_SIZE;
seq_printf(m, "MaxMem=%lu\n", maxmem);
}
static int pseries_lparcfg_data(struct seq_file *m, void *v)
{
int partition_potential_processors;
int partition_active_processors;
struct device_node *rtas_node;
const __be32 *lrdrp = NULL;
rtas_node = of_find_node_by_path("/rtas");
if (rtas_node)
lrdrp = of_get_property(rtas_node, "ibm,lrdr-capacity", NULL);
if (lrdrp == NULL) {
partition_potential_processors = vdso_data->processorCount;
} else {
partition_potential_processors = be32_to_cpup(lrdrp + 4);
}
of_node_put(rtas_node);
partition_active_processors = lparcfg_count_active_processors();
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
/* this call handles the ibm,get-system-parameter contents */
read_lpar_name(m);
parse_system_parameter_string(m);
parse_ppp_data(m);
parse_mpp_data(m);
parse_mpp_x_data(m);
pseries_cmo_data(m);
splpar_dispatch_data(m);
seq_printf(m, "purr=%ld\n", get_purr());
seq_printf(m, "tbr=%ld\n", mftb());
} else { /* non SPLPAR case */
seq_printf(m, "system_active_processors=%d\n",
partition_potential_processors);
seq_printf(m, "system_potential_processors=%d\n",
partition_potential_processors);
seq_printf(m, "partition_max_entitled_capacity=%d\n",
partition_potential_processors * 100);
seq_printf(m, "partition_entitled_capacity=%d\n",
partition_active_processors * 100);
}
show_gpci_data(m);
seq_printf(m, "partition_active_processors=%d\n",
partition_active_processors);
seq_printf(m, "partition_potential_processors=%d\n",
partition_potential_processors);
seq_printf(m, "shared_processor_mode=%d\n",
lppaca_shared_proc());
#ifdef CONFIG_PPC_64S_HASH_MMU
if (!radix_enabled())
seq_printf(m, "slb_size=%d\n", mmu_slb_size);
#endif
parse_em_data(m);
maxmem_data(m);
seq_printf(m, "security_flavor=%u\n", pseries_security_flavor);
return 0;
}
static ssize_t update_ppp(u64 *entitlement, u8 *weight)
{
struct hvcall_ppp_data ppp_data;
u8 new_weight;
u64 new_entitled;
ssize_t retval;
/* Get our current parameters */
retval = h_get_ppp(&ppp_data);
if (retval)
return retval;
if (entitlement) {
new_weight = ppp_data.weight;
new_entitled = *entitlement;
} else if (weight) {
new_weight = *weight;
new_entitled = ppp_data.entitlement;
} else
return -EINVAL;
pr_debug("%s: current_entitled = %llu, current_weight = %u\n",
__func__, ppp_data.entitlement, ppp_data.weight);
pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
__func__, new_entitled, new_weight);
retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
return retval;
}
/**
* update_mpp
*
* Update the memory entitlement and weight for the partition. Caller must
* specify either a new entitlement or weight, not both, to be updated
* since the h_set_mpp call takes both entitlement and weight as parameters.
*/
static ssize_t update_mpp(u64 *entitlement, u8 *weight)
{
struct hvcall_mpp_data mpp_data;
u64 new_entitled;
u8 new_weight;
ssize_t rc;
if (entitlement) {
/* Check with vio to ensure the new memory entitlement
* can be handled.
*/
rc = vio_cmo_entitlement_update(*entitlement);
if (rc)
return rc;
}
rc = h_get_mpp(&mpp_data);
if (rc)
return rc;
if (entitlement) {
new_weight = mpp_data.mem_weight;
new_entitled = *entitlement;
} else if (weight) {
new_weight = *weight;
new_entitled = mpp_data.entitled_mem;
} else
return -EINVAL;
pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
__func__, mpp_data.entitled_mem, mpp_data.mem_weight);
pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
__func__, new_entitled, new_weight);
rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
return rc;
}
/*
* Interface for changing system parameters (variable capacity weight
* and entitled capacity). Format of input is "param_name=value";
* anything after value is ignored. Valid parameters at this time are
* "partition_entitled_capacity" and "capacity_weight". We use
* H_SET_PPP to alter parameters.
*
* This function should be invoked only on systems with
* FW_FEATURE_SPLPAR.
*/
static ssize_t lparcfg_write(struct file *file, const char __user * buf,
size_t count, loff_t * off)
{
char kbuf[64];
char *tmp;
u64 new_entitled, *new_entitled_ptr = &new_entitled;
u8 new_weight, *new_weight_ptr = &new_weight;
ssize_t retval;
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return -EINVAL;
if (count > sizeof(kbuf))
return -EINVAL;
if (copy_from_user(kbuf, buf, count))
return -EFAULT;
kbuf[count - 1] = '\0';
tmp = strchr(kbuf, '=');
if (!tmp)
return -EINVAL;
*tmp++ = '\0';
if (!strcmp(kbuf, "partition_entitled_capacity")) {
char *endp;
*new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
if (endp == tmp)
return -EINVAL;
retval = update_ppp(new_entitled_ptr, NULL);
if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
/*
* The hypervisor assigns VAS resources based
* on entitled capacity for shared mode.
* Reconfig VAS windows based on DLPAR CPU events.
*/
if (pseries_vas_dlpar_cpu() != 0)
retval = H_HARDWARE;
}
} else if (!strcmp(kbuf, "capacity_weight")) {
char *endp;
*new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
if (endp == tmp)
return -EINVAL;
retval = update_ppp(NULL, new_weight_ptr);
} else if (!strcmp(kbuf, "entitled_memory")) {
char *endp;
*new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
if (endp == tmp)
return -EINVAL;
retval = update_mpp(new_entitled_ptr, NULL);
} else if (!strcmp(kbuf, "entitled_memory_weight")) {
char *endp;
*new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
if (endp == tmp)
return -EINVAL;
retval = update_mpp(NULL, new_weight_ptr);
} else
return -EINVAL;
if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
retval = count;
} else if (retval == H_BUSY) {
retval = -EBUSY;
} else if (retval == H_HARDWARE) {
retval = -EIO;
} else if (retval == H_PARAMETER) {
retval = -EINVAL;
}
return retval;
}
static int lparcfg_data(struct seq_file *m, void *v)
{
struct device_node *rootdn;
const char *model = "";
const char *system_id = "";
const char *tmp;
const __be32 *lp_index_ptr;
unsigned int lp_index = 0;
seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
rootdn = of_find_node_by_path("/");
if (rootdn) {
tmp = of_get_property(rootdn, "model", NULL);
if (tmp)
model = tmp;
tmp = of_get_property(rootdn, "system-id", NULL);
if (tmp)
system_id = tmp;
lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
NULL);
if (lp_index_ptr)
lp_index = be32_to_cpup(lp_index_ptr);
of_node_put(rootdn);
}
seq_printf(m, "serial_number=%s\n", system_id);
seq_printf(m, "system_type=%s\n", model);
seq_printf(m, "partition_id=%d\n", (int)lp_index);
return pseries_lparcfg_data(m, v);
}
static int lparcfg_open(struct inode *inode, struct file *file)
{
return single_open(file, lparcfg_data, NULL);
}
static const struct proc_ops lparcfg_proc_ops = {
.proc_read = seq_read,
.proc_write = lparcfg_write,
.proc_open = lparcfg_open,
.proc_release = single_release,
.proc_lseek = seq_lseek,
};
static int __init lparcfg_init(void)
{
umode_t mode = 0444;
/* Allow writing if we have FW_FEATURE_SPLPAR */
if (firmware_has_feature(FW_FEATURE_SPLPAR))
mode |= 0200;
if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_proc_ops)) {
printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
return -EIO;
}
return 0;
}
machine_device_initcall(pseries, lparcfg_init);
| linux-master | arch/powerpc/platforms/pseries/lparcfg.c |
// SPDX-License-Identifier: GPL-2.0
#include <asm/byteorder.h>
#include <asm/vphn.h>
/*
* The associativity domain numbers are returned from the hypervisor as a
* stream of mixed 16-bit and 32-bit fields. The stream is terminated by the
* special value of "all ones" (aka. 0xffff) and its size may not exceed 48
* bytes.
*
* --- 16-bit fields -->
* _________________________
* | 0 | 1 | 2 | 3 | be_packed[0]
* ------+-----+-----+------
* _________________________
* | 4 | 5 | 6 | 7 | be_packed[1]
* -------------------------
* ...
* _________________________
* | 20 | 21 | 22 | 23 | be_packed[5]
* -------------------------
*
* Convert to the sequence they would appear in the ibm,associativity property.
*/
static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
{
__be64 be_packed[VPHN_REGISTER_COUNT];
int i, nr_assoc_doms = 0;
const __be16 *field = (const __be16 *) be_packed;
u16 last = 0;
bool is_32bit = false;
#define VPHN_FIELD_UNUSED (0xffff)
#define VPHN_FIELD_MSB (0x8000)
#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
/* Let's fix the values returned by plpar_hcall9() */
for (i = 0; i < VPHN_REGISTER_COUNT; i++)
be_packed[i] = cpu_to_be64(packed[i]);
for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
u16 new = be16_to_cpup(field++);
if (is_32bit) {
/*
* Let's concatenate the 16 bits of this field to the
* 15 lower bits of the previous field
*/
unpacked[++nr_assoc_doms] =
cpu_to_be32(last << 16 | new);
is_32bit = false;
} else if (new == VPHN_FIELD_UNUSED)
/* This is the list terminator */
break;
else if (new & VPHN_FIELD_MSB) {
/* Data is in the lower 15 bits of this field */
unpacked[++nr_assoc_doms] =
cpu_to_be32(new & VPHN_FIELD_MASK);
} else {
/*
* Data is in the lower 15 bits of this field
* concatenated with the next 16 bit field
*/
last = new;
is_32bit = true;
}
}
/* The first cell contains the length of the property */
unpacked[0] = cpu_to_be32(nr_assoc_doms);
return nr_assoc_doms;
}
/* NOTE: This file is included by a selftest and built in userspace. */
#ifdef __KERNEL__
#include <asm/hvcall.h>
long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity)
{
long rc;
long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, cpu);
if (rc == H_SUCCESS)
vphn_unpack_associativity(retbuf, associativity);
return rc;
}
#endif
| linux-master | arch/powerpc/platforms/pseries/vphn.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2006 Mike Kravetz IBM Corporation
*
* Hypervisor Call Instrumentation
*/
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <asm/hvcall.h>
#include <asm/firmware.h>
#include <asm/cputable.h>
#include <asm/trace.h>
#include <asm/machdep.h>
/* For hcall instrumentation. One structure per-hcall, per-CPU */
struct hcall_stats {
unsigned long num_calls; /* number of calls (on this CPU) */
unsigned long tb_total; /* total wall time (mftb) of calls. */
unsigned long purr_total; /* total cpu time (PURR) of calls. */
unsigned long tb_start;
unsigned long purr_start;
};
#define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1)
static DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats);
/*
* Routines for displaying the statistics in debugfs
*/
static void *hc_start(struct seq_file *m, loff_t *pos)
{
if ((int)*pos < (HCALL_STAT_ARRAY_SIZE-1))
return (void *)(unsigned long)(*pos + 1);
return NULL;
}
static void *hc_next(struct seq_file *m, void *p, loff_t * pos)
{
++*pos;
return hc_start(m, pos);
}
static void hc_stop(struct seq_file *m, void *p)
{
}
static int hc_show(struct seq_file *m, void *p)
{
unsigned long h_num = (unsigned long)p;
struct hcall_stats *hs = m->private;
if (hs[h_num].num_calls) {
if (cpu_has_feature(CPU_FTR_PURR))
seq_printf(m, "%lu %lu %lu %lu\n", h_num<<2,
hs[h_num].num_calls,
hs[h_num].tb_total,
hs[h_num].purr_total);
else
seq_printf(m, "%lu %lu %lu\n", h_num<<2,
hs[h_num].num_calls,
hs[h_num].tb_total);
}
return 0;
}
static const struct seq_operations hcall_inst_sops = {
.start = hc_start,
.next = hc_next,
.stop = hc_stop,
.show = hc_show
};
DEFINE_SEQ_ATTRIBUTE(hcall_inst);
#define HCALL_ROOT_DIR "hcall_inst"
#define CPU_NAME_BUF_SIZE 32
static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long *args)
{
struct hcall_stats *h;
if (opcode > MAX_HCALL_OPCODE)
return;
h = this_cpu_ptr(&hcall_stats[opcode / 4]);
h->tb_start = mftb();
h->purr_start = mfspr(SPRN_PURR);
}
static void probe_hcall_exit(void *ignored, unsigned long opcode, long retval,
unsigned long *retbuf)
{
struct hcall_stats *h;
if (opcode > MAX_HCALL_OPCODE)
return;
h = this_cpu_ptr(&hcall_stats[opcode / 4]);
h->num_calls++;
h->tb_total += mftb() - h->tb_start;
h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
}
static int __init hcall_inst_init(void)
{
struct dentry *hcall_root;
char cpu_name_buf[CPU_NAME_BUF_SIZE];
int cpu;
if (!firmware_has_feature(FW_FEATURE_LPAR))
return 0;
if (register_trace_hcall_entry(probe_hcall_entry, NULL))
return -EINVAL;
if (register_trace_hcall_exit(probe_hcall_exit, NULL)) {
unregister_trace_hcall_entry(probe_hcall_entry, NULL);
return -EINVAL;
}
hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL);
for_each_possible_cpu(cpu) {
snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu);
debugfs_create_file(cpu_name_buf, 0444, hcall_root,
per_cpu(hcall_stats, cpu),
&hcall_inst_fops);
}
return 0;
}
machine_device_initcall(pseries, hcall_inst_init);
| linux-master | arch/powerpc/platforms/pseries/hvCall_inst.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER platform energy management driver
* Copyright (C) 2010 IBM Corporation
*
* This pseries platform device driver provides access to
* platform energy management capabilities.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <asm/cputhreads.h>
#include <asm/page.h>
#include <asm/hvcall.h>
#include <asm/firmware.h>
#include <asm/prom.h>
#define MODULE_VERS "1.0"
#define MODULE_NAME "pseries_energy"
/* Driver flags */
static int sysfs_entries;
/* Helper routines */
/* Helper Routines to convert between drc_index to cpu numbers */
static u32 cpu_to_drc_index(int cpu)
{
struct device_node *dn = NULL;
struct property *info;
int thread_index;
int rc = 1;
u32 ret = 0;
dn = of_find_node_by_path("/cpus");
if (dn == NULL)
goto err;
/* Convert logical cpu number to core number */
thread_index = cpu_core_index_of_thread(cpu);
info = of_find_property(dn, "ibm,drc-info", NULL);
if (info) {
struct of_drc_info drc;
int j;
u32 num_set_entries;
const __be32 *value;
value = of_prop_next_u32(info, NULL, &num_set_entries);
if (!value)
goto err_of_node_put;
else
value++;
for (j = 0; j < num_set_entries; j++) {
of_read_drc_info_cell(&info, &value, &drc);
if (strncmp(drc.drc_type, "CPU", 3))
goto err;
if (thread_index < drc.last_drc_index)
break;
}
ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
} else {
u32 nr_drc_indexes, thread_drc_index;
/*
* The first element of ibm,drc-indexes array is the
* number of drc_indexes returned in the list. Hence
* thread_index+1 will get the drc_index corresponding
* to core number thread_index.
*/
rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
0, &nr_drc_indexes);
if (rc)
goto err_of_node_put;
WARN_ON_ONCE(thread_index > nr_drc_indexes);
rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
thread_index + 1,
&thread_drc_index);
if (rc)
goto err_of_node_put;
ret = thread_drc_index;
}
rc = 0;
err_of_node_put:
of_node_put(dn);
err:
if (rc)
printk(KERN_WARNING "cpu_to_drc_index(%d) failed", cpu);
return ret;
}
static int drc_index_to_cpu(u32 drc_index)
{
struct device_node *dn = NULL;
struct property *info;
const int *indexes;
int thread_index = 0, cpu = 0;
int rc = 1;
dn = of_find_node_by_path("/cpus");
if (dn == NULL)
goto err;
info = of_find_property(dn, "ibm,drc-info", NULL);
if (info) {
struct of_drc_info drc;
int j;
u32 num_set_entries;
const __be32 *value;
value = of_prop_next_u32(info, NULL, &num_set_entries);
if (!value)
goto err_of_node_put;
else
value++;
for (j = 0; j < num_set_entries; j++) {
of_read_drc_info_cell(&info, &value, &drc);
if (strncmp(drc.drc_type, "CPU", 3))
goto err;
if (drc_index > drc.last_drc_index) {
cpu += drc.num_sequential_elems;
continue;
}
cpu += ((drc_index - drc.drc_index_start) /
drc.sequential_inc);
thread_index = cpu_first_thread_of_core(cpu);
rc = 0;
break;
}
} else {
unsigned long int i;
indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
if (indexes == NULL)
goto err_of_node_put;
/*
* First element in the array is the number of drc_indexes
* returned. Search through the list to find the matching
* drc_index and get the core number
*/
for (i = 0; i < indexes[0]; i++) {
if (indexes[i + 1] == drc_index)
break;
}
/* Convert core number to logical cpu number */
thread_index = cpu_first_thread_of_core(i);
rc = 0;
}
err_of_node_put:
of_node_put(dn);
err:
if (rc)
printk(KERN_WARNING "drc_index_to_cpu(%d) failed", drc_index);
return thread_index;
}
/*
* pseries hypervisor call H_BEST_ENERGY provides hints to OS on
* preferred logical cpus to activate or deactivate for optimized
* energy consumption.
*/
#define FLAGS_MODE1 0x004E200000080E01UL
#define FLAGS_MODE2 0x004E200000080401UL
#define FLAGS_ACTIVATE 0x100
static ssize_t get_best_energy_list(char *page, int activate)
{
int rc, cnt, i, cpu;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
unsigned long flags = 0;
u32 *buf_page;
char *s = page;
buf_page = (u32 *) get_zeroed_page(GFP_KERNEL);
if (!buf_page)
return -ENOMEM;
flags = FLAGS_MODE1;
if (activate)
flags |= FLAGS_ACTIVATE;
rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, 0, __pa(buf_page),
0, 0, 0, 0, 0, 0);
if (rc != H_SUCCESS) {
free_page((unsigned long) buf_page);
return -EINVAL;
}
cnt = retbuf[0];
for (i = 0; i < cnt; i++) {
cpu = drc_index_to_cpu(buf_page[2*i+1]);
if ((cpu_online(cpu) && !activate) ||
(!cpu_online(cpu) && activate))
s += sprintf(s, "%d,", cpu);
}
if (s > page) { /* Something to show */
s--; /* Suppress last comma */
s += sprintf(s, "\n");
}
free_page((unsigned long) buf_page);
return s-page;
}
static ssize_t get_best_energy_data(struct device *dev,
char *page, int activate)
{
int rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
unsigned long flags = 0;
flags = FLAGS_MODE2;
if (activate)
flags |= FLAGS_ACTIVATE;
rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags,
cpu_to_drc_index(dev->id),
0, 0, 0, 0, 0, 0, 0);
if (rc != H_SUCCESS)
return -EINVAL;
return sprintf(page, "%lu\n", retbuf[1] >> 32);
}
/* Wrapper functions */
static ssize_t cpu_activate_hint_list_show(struct device *dev,
struct device_attribute *attr, char *page)
{
return get_best_energy_list(page, 1);
}
static ssize_t cpu_deactivate_hint_list_show(struct device *dev,
struct device_attribute *attr, char *page)
{
return get_best_energy_list(page, 0);
}
static ssize_t percpu_activate_hint_show(struct device *dev,
struct device_attribute *attr, char *page)
{
return get_best_energy_data(dev, page, 1);
}
static ssize_t percpu_deactivate_hint_show(struct device *dev,
struct device_attribute *attr, char *page)
{
return get_best_energy_data(dev, page, 0);
}
/*
* Create sysfs interface:
* /sys/devices/system/cpu/pseries_activate_hint_list
* /sys/devices/system/cpu/pseries_deactivate_hint_list
* Comma separated list of cpus to activate or deactivate
* /sys/devices/system/cpu/cpuN/pseries_activate_hint
* /sys/devices/system/cpu/cpuN/pseries_deactivate_hint
* Per-cpu value of the hint
*/
static struct device_attribute attr_cpu_activate_hint_list =
__ATTR(pseries_activate_hint_list, 0444,
cpu_activate_hint_list_show, NULL);
static struct device_attribute attr_cpu_deactivate_hint_list =
__ATTR(pseries_deactivate_hint_list, 0444,
cpu_deactivate_hint_list_show, NULL);
static struct device_attribute attr_percpu_activate_hint =
__ATTR(pseries_activate_hint, 0444,
percpu_activate_hint_show, NULL);
static struct device_attribute attr_percpu_deactivate_hint =
__ATTR(pseries_deactivate_hint, 0444,
percpu_deactivate_hint_show, NULL);
static int __init pseries_energy_init(void)
{
int cpu, err;
struct device *cpu_dev, *dev_root;
if (!firmware_has_feature(FW_FEATURE_BEST_ENERGY))
return 0; /* H_BEST_ENERGY hcall not supported */
/* Create the sysfs files */
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
err = device_create_file(dev_root, &attr_cpu_activate_hint_list);
if (!err)
err = device_create_file(dev_root, &attr_cpu_deactivate_hint_list);
put_device(dev_root);
if (err)
return err;
}
for_each_possible_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
err = device_create_file(cpu_dev,
&attr_percpu_activate_hint);
if (err)
break;
err = device_create_file(cpu_dev,
&attr_percpu_deactivate_hint);
if (err)
break;
}
if (err)
return err;
sysfs_entries = 1; /* Removed entries on cleanup */
return 0;
}
static void __exit pseries_energy_cleanup(void)
{
int cpu;
struct device *cpu_dev, *dev_root;
if (!sysfs_entries)
return;
/* Remove the sysfs files */
dev_root = bus_get_dev_root(&cpu_subsys);
if (dev_root) {
device_remove_file(dev_root, &attr_cpu_activate_hint_list);
device_remove_file(dev_root, &attr_cpu_deactivate_hint_list);
put_device(dev_root);
}
for_each_possible_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
sysfs_remove_file(&cpu_dev->kobj,
&attr_percpu_activate_hint.attr);
sysfs_remove_file(&cpu_dev->kobj,
&attr_percpu_deactivate_hint.attr);
}
}
module_init(pseries_energy_init);
module_exit(pseries_energy_cleanup);
MODULE_DESCRIPTION("Driver for pSeries platform energy management");
MODULE_AUTHOR("Vaidyanathan Srinivasan");
MODULE_LICENSE("GPL");
| linux-master | arch/powerpc/platforms/pseries/pseries_energy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pseries Memory Hotplug infrastructure.
*
* Copyright (C) 2008 Badari Pulavarty, IBM Corporation
*/
#define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/slab.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
#include <asm/sparsemem.h>
#include <asm/fadump.h>
#include <asm/drmem.h>
#include "pseries.h"
static void dlpar_free_property(struct property *prop)
{
kfree(prop->name);
kfree(prop->value);
kfree(prop);
}
static struct property *dlpar_clone_property(struct property *prop,
u32 prop_size)
{
struct property *new_prop;
new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
if (!new_prop)
return NULL;
new_prop->name = kstrdup(prop->name, GFP_KERNEL);
new_prop->value = kzalloc(prop_size, GFP_KERNEL);
if (!new_prop->name || !new_prop->value) {
dlpar_free_property(new_prop);
return NULL;
}
memcpy(new_prop->value, prop->value, prop->length);
new_prop->length = prop_size;
of_property_set_flag(new_prop, OF_DYNAMIC);
return new_prop;
}
static bool find_aa_index(struct device_node *dr_node,
struct property *ala_prop,
const u32 *lmb_assoc, u32 *aa_index)
{
u32 *assoc_arrays, new_prop_size;
struct property *new_prop;
int aa_arrays, aa_array_entries, aa_array_sz;
int i, index;
/*
* The ibm,associativity-lookup-arrays property is defined to be
* a 32-bit value specifying the number of associativity arrays
* followed by a 32-bitvalue specifying the number of entries per
* array, followed by the associativity arrays.
*/
assoc_arrays = ala_prop->value;
aa_arrays = be32_to_cpu(assoc_arrays[0]);
aa_array_entries = be32_to_cpu(assoc_arrays[1]);
aa_array_sz = aa_array_entries * sizeof(u32);
for (i = 0; i < aa_arrays; i++) {
index = (i * aa_array_entries) + 2;
if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
continue;
*aa_index = i;
return true;
}
new_prop_size = ala_prop->length + aa_array_sz;
new_prop = dlpar_clone_property(ala_prop, new_prop_size);
if (!new_prop)
return false;
assoc_arrays = new_prop->value;
/* increment the number of entries in the lookup array */
assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
/* copy the new associativity into the lookup array */
index = aa_arrays * aa_array_entries + 2;
memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
of_update_property(dr_node, new_prop);
/*
* The associativity lookup array index for this lmb is
* number of entries - 1 since we added its associativity
* to the end of the lookup array.
*/
*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
return true;
}
static int update_lmb_associativity_index(struct drmem_lmb *lmb)
{
struct device_node *parent, *lmb_node, *dr_node;
struct property *ala_prop;
const u32 *lmb_assoc;
u32 aa_index;
bool found;
parent = of_find_node_by_path("/");
if (!parent)
return -ENODEV;
lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
parent);
of_node_put(parent);
if (!lmb_node)
return -EINVAL;
lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
if (!lmb_assoc) {
dlpar_free_cc_nodes(lmb_node);
return -ENODEV;
}
update_numa_distance(lmb_node);
dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (!dr_node) {
dlpar_free_cc_nodes(lmb_node);
return -ENODEV;
}
ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
NULL);
if (!ala_prop) {
of_node_put(dr_node);
dlpar_free_cc_nodes(lmb_node);
return -ENODEV;
}
found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
of_node_put(dr_node);
dlpar_free_cc_nodes(lmb_node);
if (!found) {
pr_err("Could not find LMB associativity\n");
return -1;
}
lmb->aa_index = aa_index;
return 0;
}
static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
{
unsigned long section_nr;
struct memory_block *mem_block;
section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
mem_block = find_memory_block(section_nr);
return mem_block;
}
static int get_lmb_range(u32 drc_index, int n_lmbs,
struct drmem_lmb **start_lmb,
struct drmem_lmb **end_lmb)
{
struct drmem_lmb *lmb, *start, *end;
struct drmem_lmb *limit;
start = NULL;
for_each_drmem_lmb(lmb) {
if (lmb->drc_index == drc_index) {
start = lmb;
break;
}
}
if (!start)
return -EINVAL;
end = &start[n_lmbs];
limit = &drmem_info->lmbs[drmem_info->n_lmbs];
if (end > limit)
return -EINVAL;
*start_lmb = start;
*end_lmb = end;
return 0;
}
static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
{
struct memory_block *mem_block;
int rc;
mem_block = lmb_to_memblock(lmb);
if (!mem_block)
return -EINVAL;
if (online && mem_block->dev.offline)
rc = device_online(&mem_block->dev);
else if (!online && !mem_block->dev.offline)
rc = device_offline(&mem_block->dev);
else
rc = 0;
put_device(&mem_block->dev);
return rc;
}
static int dlpar_online_lmb(struct drmem_lmb *lmb)
{
return dlpar_change_lmb_state(lmb, true);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
static int dlpar_offline_lmb(struct drmem_lmb *lmb)
{
return dlpar_change_lmb_state(lmb, false);
}
static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
{
unsigned long start_pfn;
int sections_per_block;
int i;
start_pfn = base >> PAGE_SHIFT;
lock_device_hotplug();
if (!pfn_valid(start_pfn))
goto out;
sections_per_block = memory_block_size / MIN_MEMORY_BLOCK_SIZE;
for (i = 0; i < sections_per_block; i++) {
__remove_memory(base, MIN_MEMORY_BLOCK_SIZE);
base += MIN_MEMORY_BLOCK_SIZE;
}
out:
/* Update memory regions for memory remove */
memblock_remove(base, memblock_size);
unlock_device_hotplug();
return 0;
}
static int pseries_remove_mem_node(struct device_node *np)
{
int ret;
struct resource res;
/*
* Check to see if we are actually removing memory
*/
if (!of_node_is_type(np, "memory"))
return 0;
/*
* Find the base address and size of the memblock
*/
ret = of_address_to_resource(np, 0, &res);
if (ret)
return ret;
pseries_remove_memblock(res.start, resource_size(&res));
return 0;
}
static bool lmb_is_removable(struct drmem_lmb *lmb)
{
if ((lmb->flags & DRCONF_MEM_RESERVED) ||
!(lmb->flags & DRCONF_MEM_ASSIGNED))
return false;
#ifdef CONFIG_FA_DUMP
/*
* Don't hot-remove memory that falls in fadump boot memory area
* and memory that is reserved for capturing old kernel memory.
*/
if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
return false;
#endif
/* device_offline() will determine if we can actually remove this lmb */
return true;
}
static int dlpar_add_lmb(struct drmem_lmb *);
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
{
struct memory_block *mem_block;
int rc;
if (!lmb_is_removable(lmb))
return -EINVAL;
mem_block = lmb_to_memblock(lmb);
if (mem_block == NULL)
return -EINVAL;
rc = dlpar_offline_lmb(lmb);
if (rc) {
put_device(&mem_block->dev);
return rc;
}
__remove_memory(lmb->base_addr, memory_block_size);
put_device(&mem_block->dev);
/* Update memory regions for memory remove */
memblock_remove(lmb->base_addr, memory_block_size);
invalidate_lmb_associativity_index(lmb);
lmb->flags &= ~DRCONF_MEM_ASSIGNED;
return 0;
}
static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
{
struct drmem_lmb *lmb;
int lmbs_reserved = 0;
int lmbs_available = 0;
int rc;
pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
if (lmbs_to_remove == 0)
return -EINVAL;
/* Validate that there are enough LMBs to satisfy the request */
for_each_drmem_lmb(lmb) {
if (lmb_is_removable(lmb))
lmbs_available++;
if (lmbs_available == lmbs_to_remove)
break;
}
if (lmbs_available < lmbs_to_remove) {
pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
lmbs_available, lmbs_to_remove);
return -EINVAL;
}
for_each_drmem_lmb(lmb) {
rc = dlpar_remove_lmb(lmb);
if (rc)
continue;
/* Mark this lmb so we can add it later if all of the
* requested LMBs cannot be removed.
*/
drmem_mark_lmb_reserved(lmb);
lmbs_reserved++;
if (lmbs_reserved == lmbs_to_remove)
break;
}
if (lmbs_reserved != lmbs_to_remove) {
pr_err("Memory hot-remove failed, adding LMB's back\n");
for_each_drmem_lmb(lmb) {
if (!drmem_lmb_reserved(lmb))
continue;
rc = dlpar_add_lmb(lmb);
if (rc)
pr_err("Failed to add LMB back, drc index %x\n",
lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
lmbs_reserved--;
if (lmbs_reserved == 0)
break;
}
rc = -EINVAL;
} else {
for_each_drmem_lmb(lmb) {
if (!drmem_lmb_reserved(lmb))
continue;
dlpar_release_drc(lmb->drc_index);
pr_info("Memory at %llx was hot-removed\n",
lmb->base_addr);
drmem_remove_lmb_reservation(lmb);
lmbs_reserved--;
if (lmbs_reserved == 0)
break;
}
rc = 0;
}
return rc;
}
static int dlpar_memory_remove_by_index(u32 drc_index)
{
struct drmem_lmb *lmb;
int lmb_found;
int rc;
pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
lmb_found = 0;
for_each_drmem_lmb(lmb) {
if (lmb->drc_index == drc_index) {
lmb_found = 1;
rc = dlpar_remove_lmb(lmb);
if (!rc)
dlpar_release_drc(lmb->drc_index);
break;
}
}
if (!lmb_found)
rc = -EINVAL;
if (rc)
pr_debug("Failed to hot-remove memory at %llx\n",
lmb->base_addr);
else
pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
return rc;
}
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
{
struct drmem_lmb *lmb, *start_lmb, *end_lmb;
int rc;
pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
lmbs_to_remove, drc_index);
if (lmbs_to_remove == 0)
return -EINVAL;
rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
if (rc)
return -EINVAL;
/*
* Validate that all LMBs in range are not reserved. Note that it
* is ok if they are !ASSIGNED since our goal here is to remove the
* LMB range, regardless of whether some LMBs were already removed
* by any other reason.
*
* This is a contrast to what is done in remove_by_count() where we
* check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
* because we want to remove a fixed amount of LMBs in that function.
*/
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
if (lmb->flags & DRCONF_MEM_RESERVED) {
pr_err("Memory at %llx (drc index %x) is reserved\n",
lmb->base_addr, lmb->drc_index);
return -EINVAL;
}
}
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
/*
* dlpar_remove_lmb() will error out if the LMB is already
* !ASSIGNED, but this case is a no-op for us.
*/
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
continue;
rc = dlpar_remove_lmb(lmb);
if (rc)
break;
drmem_mark_lmb_reserved(lmb);
}
if (rc) {
pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
if (!drmem_lmb_reserved(lmb))
continue;
/*
* Setting the isolation state of an UNISOLATED/CONFIGURED
* device to UNISOLATE is a no-op, but the hypervisor can
* use it as a hint that the LMB removal failed.
*/
dlpar_unisolate_drc(lmb->drc_index);
rc = dlpar_add_lmb(lmb);
if (rc)
pr_err("Failed to add LMB, drc index %x\n",
lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
}
rc = -EINVAL;
} else {
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
if (!drmem_lmb_reserved(lmb))
continue;
dlpar_release_drc(lmb->drc_index);
pr_info("Memory at %llx (drc index %x) was hot-removed\n",
lmb->base_addr, lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
}
}
return rc;
}
#else
static inline int pseries_remove_memblock(unsigned long base,
unsigned long memblock_size)
{
return -EOPNOTSUPP;
}
static inline int pseries_remove_mem_node(struct device_node *np)
{
return 0;
}
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
{
return -EOPNOTSUPP;
}
static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
{
return -EOPNOTSUPP;
}
static int dlpar_memory_remove_by_index(u32 drc_index)
{
return -EOPNOTSUPP;
}
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
static int dlpar_add_lmb(struct drmem_lmb *lmb)
{
unsigned long block_sz;
int nid, rc;
if (lmb->flags & DRCONF_MEM_ASSIGNED)
return -EINVAL;
rc = update_lmb_associativity_index(lmb);
if (rc) {
dlpar_release_drc(lmb->drc_index);
return rc;
}
block_sz = memory_block_size_bytes();
/* Find the node id for this LMB. Fake one if necessary. */
nid = of_drconf_to_nid_single(lmb);
if (nid < 0 || !node_possible(nid))
nid = first_online_node;
/* Add the memory */
rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_MEMMAP_ON_MEMORY);
if (rc) {
invalidate_lmb_associativity_index(lmb);
return rc;
}
rc = dlpar_online_lmb(lmb);
if (rc) {
__remove_memory(lmb->base_addr, block_sz);
invalidate_lmb_associativity_index(lmb);
} else {
lmb->flags |= DRCONF_MEM_ASSIGNED;
}
return rc;
}
static int dlpar_memory_add_by_count(u32 lmbs_to_add)
{
struct drmem_lmb *lmb;
int lmbs_available = 0;
int lmbs_reserved = 0;
int rc;
pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
if (lmbs_to_add == 0)
return -EINVAL;
/* Validate that there are enough LMBs to satisfy the request */
for_each_drmem_lmb(lmb) {
if (lmb->flags & DRCONF_MEM_RESERVED)
continue;
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
lmbs_available++;
if (lmbs_available == lmbs_to_add)
break;
}
if (lmbs_available < lmbs_to_add)
return -EINVAL;
for_each_drmem_lmb(lmb) {
if (lmb->flags & DRCONF_MEM_ASSIGNED)
continue;
rc = dlpar_acquire_drc(lmb->drc_index);
if (rc)
continue;
rc = dlpar_add_lmb(lmb);
if (rc) {
dlpar_release_drc(lmb->drc_index);
continue;
}
/* Mark this lmb so we can remove it later if all of the
* requested LMBs cannot be added.
*/
drmem_mark_lmb_reserved(lmb);
lmbs_reserved++;
if (lmbs_reserved == lmbs_to_add)
break;
}
if (lmbs_reserved != lmbs_to_add) {
pr_err("Memory hot-add failed, removing any added LMBs\n");
for_each_drmem_lmb(lmb) {
if (!drmem_lmb_reserved(lmb))
continue;
rc = dlpar_remove_lmb(lmb);
if (rc)
pr_err("Failed to remove LMB, drc index %x\n",
lmb->drc_index);
else
dlpar_release_drc(lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
lmbs_reserved--;
if (lmbs_reserved == 0)
break;
}
rc = -EINVAL;
} else {
for_each_drmem_lmb(lmb) {
if (!drmem_lmb_reserved(lmb))
continue;
pr_debug("Memory at %llx (drc index %x) was hot-added\n",
lmb->base_addr, lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
lmbs_reserved--;
if (lmbs_reserved == 0)
break;
}
rc = 0;
}
return rc;
}
static int dlpar_memory_add_by_index(u32 drc_index)
{
struct drmem_lmb *lmb;
int rc, lmb_found;
pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
lmb_found = 0;
for_each_drmem_lmb(lmb) {
if (lmb->drc_index == drc_index) {
lmb_found = 1;
rc = dlpar_acquire_drc(lmb->drc_index);
if (!rc) {
rc = dlpar_add_lmb(lmb);
if (rc)
dlpar_release_drc(lmb->drc_index);
}
break;
}
}
if (!lmb_found)
rc = -EINVAL;
if (rc)
pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
else
pr_info("Memory at %llx (drc index %x) was hot-added\n",
lmb->base_addr, drc_index);
return rc;
}
static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
{
struct drmem_lmb *lmb, *start_lmb, *end_lmb;
int rc;
pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
lmbs_to_add, drc_index);
if (lmbs_to_add == 0)
return -EINVAL;
rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
if (rc)
return -EINVAL;
/* Validate that the LMBs in this range are not reserved */
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
/* Fail immediately if the whole range can't be hot-added */
if (lmb->flags & DRCONF_MEM_RESERVED) {
pr_err("Memory at %llx (drc index %x) is reserved\n",
lmb->base_addr, lmb->drc_index);
return -EINVAL;
}
}
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
if (lmb->flags & DRCONF_MEM_ASSIGNED)
continue;
rc = dlpar_acquire_drc(lmb->drc_index);
if (rc)
break;
rc = dlpar_add_lmb(lmb);
if (rc) {
dlpar_release_drc(lmb->drc_index);
break;
}
drmem_mark_lmb_reserved(lmb);
}
if (rc) {
pr_err("Memory indexed-count-add failed, removing any added LMBs\n");
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
if (!drmem_lmb_reserved(lmb))
continue;
rc = dlpar_remove_lmb(lmb);
if (rc)
pr_err("Failed to remove LMB, drc index %x\n",
lmb->drc_index);
else
dlpar_release_drc(lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
}
rc = -EINVAL;
} else {
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
if (!drmem_lmb_reserved(lmb))
continue;
pr_info("Memory at %llx (drc index %x) was hot-added\n",
lmb->base_addr, lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
}
}
return rc;
}
int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
{
u32 count, drc_index;
int rc;
lock_device_hotplug();
switch (hp_elog->action) {
case PSERIES_HP_ELOG_ACTION_ADD:
switch (hp_elog->id_type) {
case PSERIES_HP_ELOG_ID_DRC_COUNT:
count = hp_elog->_drc_u.drc_count;
rc = dlpar_memory_add_by_count(count);
break;
case PSERIES_HP_ELOG_ID_DRC_INDEX:
drc_index = hp_elog->_drc_u.drc_index;
rc = dlpar_memory_add_by_index(drc_index);
break;
case PSERIES_HP_ELOG_ID_DRC_IC:
count = hp_elog->_drc_u.ic.count;
drc_index = hp_elog->_drc_u.ic.index;
rc = dlpar_memory_add_by_ic(count, drc_index);
break;
default:
rc = -EINVAL;
break;
}
break;
case PSERIES_HP_ELOG_ACTION_REMOVE:
switch (hp_elog->id_type) {
case PSERIES_HP_ELOG_ID_DRC_COUNT:
count = hp_elog->_drc_u.drc_count;
rc = dlpar_memory_remove_by_count(count);
break;
case PSERIES_HP_ELOG_ID_DRC_INDEX:
drc_index = hp_elog->_drc_u.drc_index;
rc = dlpar_memory_remove_by_index(drc_index);
break;
case PSERIES_HP_ELOG_ID_DRC_IC:
count = hp_elog->_drc_u.ic.count;
drc_index = hp_elog->_drc_u.ic.index;
rc = dlpar_memory_remove_by_ic(count, drc_index);
break;
default:
rc = -EINVAL;
break;
}
break;
default:
pr_err("Invalid action (%d) specified\n", hp_elog->action);
rc = -EINVAL;
break;
}
if (!rc)
rc = drmem_update_dt();
unlock_device_hotplug();
return rc;
}
static int pseries_add_mem_node(struct device_node *np)
{
int ret;
struct resource res;
/*
* Check to see if we are actually adding memory
*/
if (!of_node_is_type(np, "memory"))
return 0;
/*
* Find the base and size of the memblock
*/
ret = of_address_to_resource(np, 0, &res);
if (ret)
return ret;
/*
* Update memory region to represent the memory add
*/
ret = memblock_add(res.start, resource_size(&res));
return (ret < 0) ? -EINVAL : 0;
}
static int pseries_memory_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct of_reconfig_data *rd = data;
int err = 0;
switch (action) {
case OF_RECONFIG_ATTACH_NODE:
err = pseries_add_mem_node(rd->dn);
break;
case OF_RECONFIG_DETACH_NODE:
err = pseries_remove_mem_node(rd->dn);
break;
case OF_RECONFIG_UPDATE_PROPERTY:
if (!strcmp(rd->dn->name,
"ibm,dynamic-reconfiguration-memory"))
drmem_update_lmbs(rd->prop);
}
return notifier_from_errno(err);
}
static struct notifier_block pseries_mem_nb = {
.notifier_call = pseries_memory_notifier,
};
static int __init pseries_memory_hotplug_init(void)
{
if (firmware_has_feature(FW_FEATURE_LPAR))
of_reconfig_notifier_register(&pseries_mem_nb);
return 0;
}
machine_device_initcall(pseries, pseries_memory_hotplug_init);
| linux-master | arch/powerpc/platforms/pseries/hotplug-memory.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/string.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <asm/prom.h>
#include "of_helpers.h"
/**
* pseries_of_derive_parent - basically like dirname(1)
* @path: the full_name of a node to be added to the tree
*
* Returns the node which should be the parent of the node
* described by path. E.g., for path = "/foo/bar", returns
* the node with full_name = "/foo".
*/
struct device_node *pseries_of_derive_parent(const char *path)
{
struct device_node *parent;
char *parent_path = "/";
const char *tail;
/* We do not want the trailing '/' character */
tail = kbasename(path) - 1;
/* reject if path is "/" */
if (!strcmp(path, "/"))
return ERR_PTR(-EINVAL);
if (tail > path) {
parent_path = kstrndup(path, tail - path, GFP_KERNEL);
if (!parent_path)
return ERR_PTR(-ENOMEM);
}
parent = of_find_node_by_path(parent_path);
if (strcmp(parent_path, "/"))
kfree(parent_path);
return parent ? parent : ERR_PTR(-EINVAL);
}
/* Helper Routines to convert between drc_index to cpu numbers */
int of_read_drc_info_cell(struct property **prop, const __be32 **curval,
struct of_drc_info *data)
{
const char *p = (char *)(*curval);
const __be32 *p2;
if (!data)
return -EINVAL;
/* Get drc-type:encode-string */
data->drc_type = (char *)p;
p = of_prop_next_string(*prop, p);
if (!p)
return -EINVAL;
/* Get drc-name-prefix:encode-string */
data->drc_name_prefix = (char *)p;
p = of_prop_next_string(*prop, p);
if (!p)
return -EINVAL;
/* Get drc-index-start:encode-int */
p2 = (const __be32 *)p;
data->drc_index_start = be32_to_cpu(*p2);
/* Get drc-name-suffix-start:encode-int */
p2 = of_prop_next_u32(*prop, p2, &data->drc_name_suffix_start);
if (!p2)
return -EINVAL;
/* Get number-sequential-elements:encode-int */
p2 = of_prop_next_u32(*prop, p2, &data->num_sequential_elems);
if (!p2)
return -EINVAL;
/* Get sequential-increment:encode-int */
p2 = of_prop_next_u32(*prop, p2, &data->sequential_inc);
if (!p2)
return -EINVAL;
/* Get drc-power-domain:encode-int */
p2 = of_prop_next_u32(*prop, p2, &data->drc_power_domain);
if (!p2)
return -EINVAL;
/* Should now know end of current entry */
(*curval) = (void *)(++p2);
data->last_drc_index = data->drc_index_start +
((data->num_sequential_elems - 1) * data->sequential_inc);
return 0;
}
EXPORT_SYMBOL(of_read_drc_info_cell);
| linux-master | arch/powerpc/platforms/pseries/of_helpers.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* 64-bit pSeries and RS/6000 setup code.
*
* Copyright (C) 1995 Linus Torvalds
* Adapted from 'alpha' version by Gary Thomas
* Modified by Cort Dougan ([email protected])
* Modified by PPC64 Team, IBM Corp
*/
/*
* bootup setup stuff..
*/
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/user.h>
#include <linux/tty.h>
#include <linux/major.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/console.h>
#include <linux/pci.h>
#include <linux/utsname.h>
#include <linux/adb.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/memblock.h>
#include <linux/swiotlb.h>
#include <linux/seq_buf.h>
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
#include <asm/dma.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/time.h>
#include <asm/nvram.h>
#include <asm/pmc.h>
#include <asm/xics.h>
#include <asm/xive.h>
#include <asm/papr-sysparm.h>
#include <asm/ppc-pci.h>
#include <asm/i8259.h>
#include <asm/udbg.h>
#include <asm/smp.h>
#include <asm/firmware.h>
#include <asm/eeh.h>
#include <asm/reg.h>
#include <asm/plpar_wrappers.h>
#include <asm/kexec.h>
#include <asm/isa-bridge.h>
#include <asm/security_features.h>
#include <asm/asm-const.h>
#include <asm/idle.h>
#include <asm/swiotlb.h>
#include <asm/svm.h>
#include <asm/dtl.h>
#include <asm/hvconsole.h>
#include <asm/setup.h>
#include "pseries.h"
DEFINE_STATIC_KEY_FALSE(shared_processor);
EXPORT_SYMBOL(shared_processor);
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static bool steal_acc = true;
static int __init parse_no_stealacc(char *arg)
{
steal_acc = false;
return 0;
}
early_param("no-steal-acc", parse_no_stealacc);
#endif
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
EXPORT_SYMBOL(CMO_PageSize);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
int ibm_nmi_interlock_token;
u32 pseries_security_flavor;
static void pSeries_show_cpuinfo(struct seq_file *m)
{
struct device_node *root;
const char *model = "";
root = of_find_node_by_path("/");
if (root)
model = of_get_property(root, "model", NULL);
seq_printf(m, "machine\t\t: CHRP %s\n", model);
of_node_put(root);
if (radix_enabled())
seq_printf(m, "MMU\t\t: Radix\n");
else
seq_printf(m, "MMU\t\t: Hash\n");
}
/* Initialize firmware assisted non-maskable interrupts if
* the firmware supports this feature.
*/
static void __init fwnmi_init(void)
{
unsigned long system_reset_addr, machine_check_addr;
u8 *mce_data_buf;
unsigned int i;
int nr_cpus = num_possible_cpus();
#ifdef CONFIG_PPC_64S_HASH_MMU
struct slb_entry *slb_ptr;
size_t size;
#endif
int ibm_nmi_register_token;
ibm_nmi_register_token = rtas_function_token(RTAS_FN_IBM_NMI_REGISTER);
if (ibm_nmi_register_token == RTAS_UNKNOWN_SERVICE)
return;
ibm_nmi_interlock_token = rtas_function_token(RTAS_FN_IBM_NMI_INTERLOCK);
if (WARN_ON(ibm_nmi_interlock_token == RTAS_UNKNOWN_SERVICE))
return;
/* If the kernel's not linked at zero we point the firmware at low
* addresses anyway, and use a trampoline to get to the real code. */
system_reset_addr = __pa(system_reset_fwnmi) - PHYSICAL_START;
machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
if (0 == rtas_call(ibm_nmi_register_token, 2, 1, NULL,
system_reset_addr, machine_check_addr))
fwnmi_active = 1;
/*
* Allocate a chunk for per cpu buffer to hold rtas errorlog.
* It will be used in real mode mce handler, hence it needs to be
* below RMA.
*/
mce_data_buf = memblock_alloc_try_nid_raw(RTAS_ERROR_LOG_MAX * nr_cpus,
RTAS_ERROR_LOG_MAX, MEMBLOCK_LOW_LIMIT,
ppc64_rma_size, NUMA_NO_NODE);
if (!mce_data_buf)
panic("Failed to allocate %d bytes below %pa for MCE buffer\n",
RTAS_ERROR_LOG_MAX * nr_cpus, &ppc64_rma_size);
for_each_possible_cpu(i) {
paca_ptrs[i]->mce_data_buf = mce_data_buf +
(RTAS_ERROR_LOG_MAX * i);
}
#ifdef CONFIG_PPC_64S_HASH_MMU
if (!radix_enabled()) {
/* Allocate per cpu area to save old slb contents during MCE */
size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus;
slb_ptr = memblock_alloc_try_nid_raw(size,
sizeof(struct slb_entry), MEMBLOCK_LOW_LIMIT,
ppc64_rma_size, NUMA_NO_NODE);
if (!slb_ptr)
panic("Failed to allocate %zu bytes below %pa for slb area\n",
size, &ppc64_rma_size);
for_each_possible_cpu(i)
paca_ptrs[i]->mce_faulty_slbs = slb_ptr + (mmu_slb_size * i);
}
#endif
}
/*
* Affix a device for the first timer to the platform bus if
* we have firmware support for the H_WATCHDOG hypercall.
*/
static __init int pseries_wdt_init(void)
{
if (firmware_has_feature(FW_FEATURE_WATCHDOG))
platform_device_register_simple("pseries-wdt", 0, NULL, 0);
return 0;
}
machine_subsys_initcall(pseries, pseries_wdt_init);
static void pseries_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
static void __init pseries_setup_i8259_cascade(void)
{
struct device_node *np, *old, *found = NULL;
unsigned int cascade;
const u32 *addrp;
unsigned long intack = 0;
int naddr;
for_each_node_by_type(np, "interrupt-controller") {
if (of_device_is_compatible(np, "chrp,iic")) {
found = np;
break;
}
}
if (found == NULL) {
printk(KERN_DEBUG "pic: no ISA interrupt controller\n");
return;
}
cascade = irq_of_parse_and_map(found, 0);
if (!cascade) {
printk(KERN_ERR "pic: failed to map cascade interrupt");
return;
}
pr_debug("pic: cascade mapped to irq %d\n", cascade);
for (old = of_node_get(found); old != NULL ; old = np) {
np = of_get_parent(old);
of_node_put(old);
if (np == NULL)
break;
if (!of_node_name_eq(np, "pci"))
continue;
addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL);
if (addrp == NULL)
continue;
naddr = of_n_addr_cells(np);
intack = addrp[naddr-1];
if (naddr > 1)
intack |= ((unsigned long)addrp[naddr-2]) << 32;
}
if (intack)
printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack);
i8259_init(found, intack);
of_node_put(found);
irq_set_chained_handler(cascade, pseries_8259_cascade);
}
static void __init pseries_init_irq(void)
{
/* Try using a XIVE if available, otherwise use a XICS */
if (!xive_spapr_init()) {
xics_init();
pseries_setup_i8259_cascade();
}
}
static void pseries_lpar_enable_pmcs(void)
{
unsigned long set, reset;
set = 1UL << 63;
reset = 0;
plpar_hcall_norets(H_PERFMON, set, reset);
}
static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct of_reconfig_data *rd = data;
struct device_node *parent, *np = rd->dn;
struct pci_dn *pdn;
int err = NOTIFY_OK;
switch (action) {
case OF_RECONFIG_ATTACH_NODE:
parent = of_get_parent(np);
pdn = parent ? PCI_DN(parent) : NULL;
if (pdn)
pci_add_device_node_info(pdn->phb, np);
of_node_put(parent);
break;
case OF_RECONFIG_DETACH_NODE:
pdn = PCI_DN(np);
if (pdn)
list_del(&pdn->list);
break;
default:
err = NOTIFY_DONE;
break;
}
return err;
}
static struct notifier_block pci_dn_reconfig_nb = {
.notifier_call = pci_dn_reconfig_notifier,
};
struct kmem_cache *dtl_cache;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/*
* Allocate space for the dispatch trace log for all possible cpus
* and register the buffers with the hypervisor. This is used for
* computing time stolen by the hypervisor.
*/
static int alloc_dispatch_logs(void)
{
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;
if (!dtl_cache)
return 0;
alloc_dtl_buffers(0);
/* Register the DTL for the current (boot) cpu */
register_dtl_buffer(smp_processor_id());
return 0;
}
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static inline int alloc_dispatch_logs(void)
{
return 0;
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static int alloc_dispatch_log_kmem_cache(void)
{
void (*ctor)(void *) = get_dtl_cache_ctor();
dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES,
DISPATCH_LOG_BYTES, 0, ctor);
if (!dtl_cache) {
pr_warn("Failed to create dispatch trace log buffer cache\n");
pr_warn("Stolen time statistics will be unreliable\n");
return 0;
}
return alloc_dispatch_logs();
}
machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);
DEFINE_PER_CPU(u64, idle_spurr_cycles);
DEFINE_PER_CPU(u64, idle_entry_purr_snap);
DEFINE_PER_CPU(u64, idle_entry_spurr_snap);
static void pseries_lpar_idle(void)
{
/*
* Default handler to go into low thread priority and possibly
* low power mode by ceding processor to hypervisor
*/
if (!prep_irq_for_idle())
return;
/* Indicate to hypervisor that we are idle. */
pseries_idle_prolog();
/*
* Yield the processor to the hypervisor. We return if
* an external interrupt occurs (which are driven prior
* to returning here) or if a prod occurs from another
* processor. When returning here, external interrupts
* are enabled.
*/
cede_processor();
pseries_idle_epilog();
}
static bool pseries_reloc_on_exception_enabled;
bool pseries_reloc_on_exception(void)
{
return pseries_reloc_on_exception_enabled;
}
EXPORT_SYMBOL_GPL(pseries_reloc_on_exception);
/*
* Enable relocation on during exceptions. This has partition wide scope and
* may take a while to complete, if it takes longer than one second we will
* just give up rather than wasting any more time on this - if that turns out
* to ever be a problem in practice we can move this into a kernel thread to
* finish off the process later in boot.
*/
bool pseries_enable_reloc_on_exc(void)
{
long rc;
unsigned int delay, total_delay = 0;
while (1) {
rc = enable_reloc_on_exceptions();
if (!H_IS_LONG_BUSY(rc)) {
if (rc == H_P2) {
pr_info("Relocation on exceptions not"
" supported\n");
return false;
} else if (rc != H_SUCCESS) {
pr_warn("Unable to enable relocation"
" on exceptions: %ld\n", rc);
return false;
}
pseries_reloc_on_exception_enabled = true;
return true;
}
delay = get_longbusy_msecs(rc);
total_delay += delay;
if (total_delay > 1000) {
pr_warn("Warning: Giving up waiting to enable "
"relocation on exceptions (%u msec)!\n",
total_delay);
return false;
}
mdelay(delay);
}
}
EXPORT_SYMBOL(pseries_enable_reloc_on_exc);
void pseries_disable_reloc_on_exc(void)
{
long rc;
while (1) {
rc = disable_reloc_on_exceptions();
if (!H_IS_LONG_BUSY(rc))
break;
mdelay(get_longbusy_msecs(rc));
}
if (rc == H_SUCCESS)
pseries_reloc_on_exception_enabled = false;
else
pr_warn("Warning: Failed to disable relocation on exceptions: %ld\n",
rc);
}
EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
#ifdef __LITTLE_ENDIAN__
void pseries_big_endian_exceptions(void)
{
long rc;
while (1) {
rc = enable_big_endian_exceptions();
if (!H_IS_LONG_BUSY(rc))
break;
mdelay(get_longbusy_msecs(rc));
}
/*
* At this point it is unlikely panic() will get anything
* out to the user, since this is called very late in kexec
* but at least this will stop us from continuing on further
* and creating an even more difficult to debug situation.
*
* There is a known problem when kdump'ing, if cpus are offline
* the above call will fail. Rather than panicking again, keep
* going and hope the kdump kernel is also little endian, which
* it usually is.
*/
if (rc && !kdump_in_progress())
panic("Could not enable big endian exceptions");
}
void __init pseries_little_endian_exceptions(void)
{
long rc;
while (1) {
rc = enable_little_endian_exceptions();
if (!H_IS_LONG_BUSY(rc))
break;
mdelay(get_longbusy_msecs(rc));
}
if (rc) {
ppc_md.progress("H_SET_MODE LE exception fail", 0);
panic("Could not enable little endian exceptions");
}
}
#endif
static void __init pSeries_discover_phbs(void)
{
struct device_node *node;
struct pci_controller *phb;
struct device_node *root = of_find_node_by_path("/");
for_each_child_of_node(root, node) {
if (!of_node_is_type(node, "pci") &&
!of_node_is_type(node, "pciex"))
continue;
phb = pcibios_alloc_controller(node);
if (!phb)
continue;
rtas_setup_phb(phb);
pci_process_bridge_OF_ranges(phb, node, 0);
isa_bridge_find_early(phb);
phb->controller_ops = pseries_pci_controller_ops;
/* create pci_dn's for DT nodes under this PHB */
pci_devs_phb_init_dynamic(phb);
pseries_msi_allocate_domains(phb);
}
of_node_put(root);
/*
* PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
* in chosen.
*/
of_pci_check_probe_only();
}
static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
{
/*
* The features below are disabled by default, so we instead look to see
* if firmware has *enabled* them, and set them if so.
*/
if (result->character & H_CPU_CHAR_SPEC_BAR_ORI31)
security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
if (result->character & H_CPU_CHAR_BCCTRL_SERIALISED)
security_ftr_set(SEC_FTR_BCCTRL_SERIALISED);
if (result->character & H_CPU_CHAR_L1D_FLUSH_ORI30)
security_ftr_set(SEC_FTR_L1D_FLUSH_ORI30);
if (result->character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
security_ftr_set(SEC_FTR_L1D_FLUSH_TRIG2);
if (result->character & H_CPU_CHAR_L1D_THREAD_PRIV)
security_ftr_set(SEC_FTR_L1D_THREAD_PRIV);
if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST)
security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
if (result->character & H_CPU_CHAR_BCCTR_LINK_FLUSH_ASSIST)
security_ftr_set(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST);
if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE)
security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
if (result->behaviour & H_CPU_BEHAV_FLUSH_LINK_STACK)
security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
/*
* The features below are enabled by default, so we instead look to see
* if firmware has *disabled* them, and clear them if so.
* H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
* H_CPU_BEHAV_FAVOUR_SECURITY is.
*/
if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) {
security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
pseries_security_flavor = 0;
} else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
pseries_security_flavor = 1;
else
pseries_security_flavor = 2;
if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY)
security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS)
security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
if (result->behaviour & H_CPU_BEHAV_NO_STF_BARRIER)
security_ftr_clear(SEC_FTR_STF_BARRIER);
if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
}
void pseries_setup_security_mitigations(void)
{
struct h_cpu_char_result result;
enum l1d_flush_type types;
bool enable;
long rc;
/*
* Set features to the defaults assumed by init_cpu_char_feature_flags()
* so it can set/clear again any features that might have changed after
* migration, and in case the hypercall fails and it is not even called.
*/
powerpc_security_features = SEC_FTR_DEFAULT;
rc = plpar_get_cpu_characteristics(&result);
if (rc == H_SUCCESS)
init_cpu_char_feature_flags(&result);
/*
* We're the guest so this doesn't apply to us, clear it to simplify
* handling of it elsewhere.
*/
security_ftr_clear(SEC_FTR_L1D_FLUSH_HV);
types = L1D_FLUSH_FALLBACK;
if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_TRIG2))
types |= L1D_FLUSH_MTTRIG;
if (security_ftr_enabled(SEC_FTR_L1D_FLUSH_ORI30))
types |= L1D_FLUSH_ORI;
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
setup_rfi_flush(types, enable);
setup_count_cache_flush();
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
setup_entry_flush(enable);
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
setup_uaccess_flush(enable);
setup_stf_barrier();
}
#ifdef CONFIG_PCI_IOV
enum rtas_iov_fw_value_map {
NUM_RES_PROPERTY = 0, /* Number of Resources */
LOW_INT = 1, /* Lowest 32 bits of Address */
START_OF_ENTRIES = 2, /* Always start of entry */
APERTURE_PROPERTY = 2, /* Start of entry+ to Aperture Size */
WDW_SIZE_PROPERTY = 4, /* Start of entry+ to Window Size */
NEXT_ENTRY = 7 /* Go to next entry on array */
};
enum get_iov_fw_value_index {
BAR_ADDRS = 1, /* Get Bar Address */
APERTURE_SIZE = 2, /* Get Aperture Size */
WDW_SIZE = 3 /* Get Window Size */
};
static resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
enum get_iov_fw_value_index value)
{
const int *indexes;
struct device_node *dn = pci_device_to_OF_node(dev);
int i, num_res, ret = 0;
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
if (!indexes)
return 0;
/*
* First element in the array is the number of Bars
* returned. Search through the list to find the matching
* bar
*/
num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
if (resno >= num_res)
return 0; /* or an error */
i = START_OF_ENTRIES + NEXT_ENTRY * resno;
switch (value) {
case BAR_ADDRS:
ret = of_read_number(&indexes[i], 2);
break;
case APERTURE_SIZE:
ret = of_read_number(&indexes[i + APERTURE_PROPERTY], 2);
break;
case WDW_SIZE:
ret = of_read_number(&indexes[i + WDW_SIZE_PROPERTY], 2);
break;
}
return ret;
}
static void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes)
{
struct resource *res;
resource_size_t base, size;
int i, r, num_res;
num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
num_res = min_t(int, num_res, PCI_SRIOV_NUM_BARS);
for (i = START_OF_ENTRIES, r = 0; r < num_res && r < PCI_SRIOV_NUM_BARS;
i += NEXT_ENTRY, r++) {
res = &dev->resource[r + PCI_IOV_RESOURCES];
base = of_read_number(&indexes[i], 2);
size = of_read_number(&indexes[i + APERTURE_PROPERTY], 2);
res->flags = pci_parse_of_flags(of_read_number
(&indexes[i + LOW_INT], 1), 0);
res->flags |= (IORESOURCE_MEM_64 | IORESOURCE_PCI_FIXED);
res->name = pci_name(dev);
res->start = base;
res->end = base + size - 1;
}
}
static void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
{
struct resource *res, *root, *conflict;
resource_size_t base, size;
int i, r, num_res;
/*
* First element in the array is the number of Bars
* returned. Search through the list to find the matching
* bars assign them from firmware into resources structure.
*/
num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
for (i = START_OF_ENTRIES, r = 0; r < num_res && r < PCI_SRIOV_NUM_BARS;
i += NEXT_ENTRY, r++) {
res = &dev->resource[r + PCI_IOV_RESOURCES];
base = of_read_number(&indexes[i], 2);
size = of_read_number(&indexes[i + WDW_SIZE_PROPERTY], 2);
res->name = pci_name(dev);
res->start = base;
res->end = base + size - 1;
root = &iomem_resource;
dev_dbg(&dev->dev,
"pSeries IOV BAR %d: trying firmware assignment %pR\n",
r + PCI_IOV_RESOURCES, res);
conflict = request_resource_conflict(root, res);
if (conflict) {
dev_info(&dev->dev,
"BAR %d: %pR conflicts with %s %pR\n",
r + PCI_IOV_RESOURCES, res,
conflict->name, conflict);
res->flags |= IORESOURCE_UNSET;
}
}
}
static void pseries_disable_sriov_resources(struct pci_dev *pdev)
{
int i;
pci_warn(pdev, "No hypervisor support for SR-IOV on this device, IOV BARs disabled.\n");
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
pdev->resource[i + PCI_IOV_RESOURCES].flags = 0;
}
static void pseries_pci_fixup_resources(struct pci_dev *pdev)
{
const int *indexes;
struct device_node *dn = pci_device_to_OF_node(pdev);
/*Firmware must support open sriov otherwise dont configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
if (indexes)
of_pci_set_vf_bar_size(pdev, indexes);
else
pseries_disable_sriov_resources(pdev);
}
static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
{
const int *indexes;
struct device_node *dn = pci_device_to_OF_node(pdev);
if (!pdev->is_physfn)
return;
/*Firmware must support open sriov otherwise don't configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
if (indexes)
of_pci_parse_iov_addrs(pdev, indexes);
else
pseries_disable_sriov_resources(pdev);
}
static resource_size_t pseries_pci_iov_resource_alignment(struct pci_dev *pdev,
int resno)
{
const __be32 *reg;
struct device_node *dn = pci_device_to_OF_node(pdev);
/*Firmware must support open sriov otherwise report regular alignment*/
reg = of_get_property(dn, "ibm,is-open-sriov-pf", NULL);
if (!reg)
return pci_iov_resource_size(pdev, resno);
if (!pdev->is_physfn)
return 0;
return pseries_get_iov_fw_value(pdev,
resno - PCI_IOV_RESOURCES,
APERTURE_SIZE);
}
#endif
static void __init pSeries_setup_arch(void)
{
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
/* Discover PIC type and setup ppc_md accordingly */
smp_init_pseries();
// Setup CPU hotplug callbacks
pseries_cpu_hotplug_init();
if (radix_enabled() && !mmu_has_feature(MMU_FTR_GTSE))
if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
panic("BUG: Radix support requires either GTSE or RPT_INVALIDATE\n");
/* openpic global configuration register (64-bit format). */
/* openpic Interrupt Source Unit pointer (64-bit format). */
/* python0 facility area (mmio) (64-bit format) REAL address. */
/* init to some ~sane value until calibrate_delay() runs */
loops_per_jiffy = 50000000;
fwnmi_init();
pseries_setup_security_mitigations();
if (!radix_enabled())
pseries_lpar_read_hblkrm_characteristics();
/* By default, only probe PCI (can be overridden by rtas_pci) */
pci_add_flags(PCI_PROBE_ONLY);
/* Find and initialize PCI host bridges */
init_pci_config_tokens();
of_reconfig_notifier_register(&pci_dn_reconfig_nb);
pSeries_nvram_init();
if (firmware_has_feature(FW_FEATURE_LPAR)) {
vpa_init(boot_cpuid);
if (lppaca_shared_proc()) {
static_branch_enable(&shared_processor);
pv_spinlocks_init();
#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
static_key_slow_inc(¶virt_steal_enabled);
if (steal_acc)
static_key_slow_inc(¶virt_steal_rq_enabled);
#endif
}
ppc_md.power_save = pseries_lpar_idle;
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
#ifdef CONFIG_PCI_IOV
ppc_md.pcibios_fixup_resources =
pseries_pci_fixup_resources;
ppc_md.pcibios_fixup_sriov =
pseries_pci_fixup_iov_resources;
ppc_md.pcibios_iov_resource_alignment =
pseries_pci_iov_resource_alignment;
#endif
} else {
/* No special idle routine */
ppc_md.enable_pmcs = power4_enable_pmcs;
}
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
pseries_rng_init();
}
static void pseries_panic(char *str)
{
panic_flush_kmsg_end();
rtas_os_term(str);
}
static int __init pSeries_init_panel(void)
{
/* Manually leave the kernel version on the panel. */
#ifdef __BIG_ENDIAN__
ppc_md.progress("Linux ppc64\n", 0);
#else
ppc_md.progress("Linux ppc64le\n", 0);
#endif
ppc_md.progress(init_utsname()->version, 0);
return 0;
}
machine_arch_initcall(pseries, pSeries_init_panel);
static int pseries_set_dabr(unsigned long dabr, unsigned long dabrx)
{
return plpar_hcall_norets(H_SET_DABR, dabr);
}
static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx)
{
/* Have to set at least one bit in the DABRX according to PAPR */
if (dabrx == 0 && dabr == 0)
dabrx = DABRX_USER;
/* PAPR says we can only set kernel and user bits */
dabrx &= DABRX_KERNEL | DABRX_USER;
return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx);
}
static int pseries_set_dawr(int nr, unsigned long dawr, unsigned long dawrx)
{
/* PAPR says we can't set HYP */
dawrx &= ~DAWRX_HYP;
if (nr == 0)
return plpar_set_watchpoint0(dawr, dawrx);
else
return plpar_set_watchpoint1(dawr, dawrx);
}
#define CMO_CHARACTERISTICS_TOKEN 44
#define CMO_MAXLENGTH 1026
void pSeries_coalesce_init(void)
{
struct hvcall_mpp_x_data mpp_x_data;
if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data))
powerpc_firmware_features |= FW_FEATURE_XCMO;
else
powerpc_firmware_features &= ~FW_FEATURE_XCMO;
}
/**
* fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
* handle that here. (Stolen from parse_system_parameter_string)
*/
static void __init pSeries_cmo_feature_init(void)
{
static struct papr_sysparm_buf buf __initdata;
static_assert(sizeof(buf.val) >= CMO_MAXLENGTH);
char *ptr, *key, *value, *end;
int page_order = IOMMU_PAGE_SHIFT_4K;
pr_debug(" -> fw_cmo_feature_init()\n");
if (papr_sysparm_get(PAPR_SYSPARM_COOP_MEM_OVERCOMMIT_ATTRS, &buf)) {
pr_debug("CMO not available\n");
pr_debug(" <- fw_cmo_feature_init()\n");
return;
}
end = &buf.val[CMO_MAXLENGTH];
ptr = &buf.val[0];
key = value = ptr;
while (*ptr && (ptr <= end)) {
/* Separate the key and value by replacing '=' with '\0' and
* point the value at the string after the '='
*/
if (ptr[0] == '=') {
ptr[0] = '\0';
value = ptr + 1;
} else if (ptr[0] == '\0' || ptr[0] == ',') {
/* Terminate the string containing the key/value pair */
ptr[0] = '\0';
if (key == value) {
pr_debug("Malformed key/value pair\n");
/* Never found a '=', end processing */
break;
}
if (0 == strcmp(key, "CMOPageSize"))
page_order = simple_strtol(value, NULL, 10);
else if (0 == strcmp(key, "PrPSP"))
CMO_PrPSP = simple_strtol(value, NULL, 10);
else if (0 == strcmp(key, "SecPSP"))
CMO_SecPSP = simple_strtol(value, NULL, 10);
value = key = ptr + 1;
}
ptr++;
}
/* Page size is returned as the power of 2 of the page size,
* convert to the page size in bytes before returning
*/
CMO_PageSize = 1 << page_order;
pr_debug("CMO_PageSize = %lu\n", CMO_PageSize);
if (CMO_PrPSP != -1 || CMO_SecPSP != -1) {
pr_info("CMO enabled\n");
pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
CMO_SecPSP);
powerpc_firmware_features |= FW_FEATURE_CMO;
pSeries_coalesce_init();
} else
pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP,
CMO_SecPSP);
pr_debug(" <- fw_cmo_feature_init()\n");
}
static void __init pseries_add_hw_description(void)
{
struct device_node *dn;
const char *s;
dn = of_find_node_by_path("/openprom");
if (dn) {
if (of_property_read_string(dn, "model", &s) == 0)
seq_buf_printf(&ppc_hw_desc, "of:%s ", s);
of_node_put(dn);
}
dn = of_find_node_by_path("/hypervisor");
if (dn) {
if (of_property_read_string(dn, "compatible", &s) == 0)
seq_buf_printf(&ppc_hw_desc, "hv:%s ", s);
of_node_put(dn);
return;
}
if (of_property_read_bool(of_root, "ibm,powervm-partition") ||
of_property_read_bool(of_root, "ibm,fw-net-version"))
seq_buf_printf(&ppc_hw_desc, "hv:phyp ");
}
/*
* Early initialization. Relocation is on but do not reference unbolted pages
*/
static void __init pseries_init(void)
{
pr_debug(" -> pseries_init()\n");
pseries_add_hw_description();
#ifdef CONFIG_HVC_CONSOLE
if (firmware_has_feature(FW_FEATURE_LPAR))
hvc_vio_init_early();
#endif
if (firmware_has_feature(FW_FEATURE_XDABR))
ppc_md.set_dabr = pseries_set_xdabr;
else if (firmware_has_feature(FW_FEATURE_DABR))
ppc_md.set_dabr = pseries_set_dabr;
if (firmware_has_feature(FW_FEATURE_SET_MODE))
ppc_md.set_dawr = pseries_set_dawr;
pSeries_cmo_feature_init();
iommu_init_early_pSeries();
pr_debug(" <- pseries_init()\n");
}
/**
* pseries_power_off - tell firmware about how to power off the system.
*
* This function calls either the power-off rtas token in normal cases
* or the ibm,power-off-ups token (if present & requested) in case of
* a power failure. If power-off token is used, power on will only be
* possible with power button press. If ibm,power-off-ups token is used
* it will allow auto poweron after power is restored.
*/
static void pseries_power_off(void)
{
int rc;
int rtas_poweroff_ups_token = rtas_function_token(RTAS_FN_IBM_POWER_OFF_UPS);
if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_POWER_OFF);
if (rtas_poweron_auto == 0 ||
rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) {
rc = rtas_call(rtas_function_token(RTAS_FN_POWER_OFF), 2, 1, NULL, -1, -1);
printk(KERN_INFO "RTAS power-off returned %d\n", rc);
} else {
rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL);
printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc);
}
for (;;);
}
static int __init pSeries_probe(void)
{
if (!of_node_is_type(of_root, "chrp"))
return 0;
/* Cell blades firmware claims to be chrp while it's not. Until this
* is fixed, we need to avoid those here.
*/
if (of_machine_is_compatible("IBM,CPBW-1.0") ||
of_machine_is_compatible("IBM,CBEA"))
return 0;
pm_power_off = pseries_power_off;
pr_debug("Machine is%s LPAR !\n",
(powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
pseries_init();
return 1;
}
static int pSeries_pci_probe_mode(struct pci_bus *bus)
{
if (firmware_has_feature(FW_FEATURE_LPAR))
return PCI_PROBE_DEVTREE;
return PCI_PROBE_NORMAL;
}
#ifdef CONFIG_MEMORY_HOTPLUG
static unsigned long pseries_memory_block_size(void)
{
return memory_block_size;
}
#endif
struct pci_controller_ops pseries_pci_controller_ops = {
.probe_mode = pSeries_pci_probe_mode,
#ifdef CONFIG_SPAPR_TCE_IOMMU
.device_group = pSeries_pci_device_group,
#endif
};
define_machine(pseries) {
.name = "pSeries",
.probe = pSeries_probe,
.setup_arch = pSeries_setup_arch,
.init_IRQ = pseries_init_irq,
.show_cpuinfo = pSeries_show_cpuinfo,
.log_error = pSeries_log_error,
.discover_phbs = pSeries_discover_phbs,
.pcibios_fixup = pSeries_final_fixup,
.restart = rtas_restart,
.halt = rtas_halt,
.panic = pseries_panic,
.get_boot_time = rtas_get_boot_time,
.get_rtc_time = rtas_get_rtc_time,
.set_rtc_time = rtas_set_rtc_time,
.progress = rtas_progress,
.system_reset_exception = pSeries_system_reset_exception,
.machine_check_early = pseries_machine_check_realmode,
.machine_check_exception = pSeries_machine_check_exception,
.machine_check_log_err = pSeries_machine_check_log_err,
#ifdef CONFIG_KEXEC_CORE
.machine_kexec = pseries_machine_kexec,
.kexec_cpu_down = pseries_kexec_cpu_down,
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
.memory_block_size = pseries_memory_block_size,
#endif
};
| linux-master | arch/powerpc/platforms/pseries/setup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Interface for power-management for ppc64 compliant platform
*
* Manish Ahuja <[email protected]>
*
* Feb 2007
*
* Copyright (C) 2007 IBM Corporation.
*/
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <asm/machdep.h>
#include "pseries.h"
unsigned long rtas_poweron_auto; /* default and normal state is 0 */
static ssize_t auto_poweron_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", rtas_poweron_auto);
}
static ssize_t auto_poweron_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
int ret;
unsigned long ups_restart;
ret = sscanf(buf, "%lu", &ups_restart);
if ((ret == 1) && ((ups_restart == 1) || (ups_restart == 0))){
rtas_poweron_auto = ups_restart;
return n;
}
return -EINVAL;
}
static struct kobj_attribute auto_poweron_attr =
__ATTR(auto_poweron, 0644, auto_poweron_show, auto_poweron_store);
#ifndef CONFIG_PM
struct kobject *power_kobj;
static struct attribute *g[] = {
&auto_poweron_attr.attr,
NULL,
};
static const struct attribute_group attr_group = {
.attrs = g,
};
static int __init pm_init(void)
{
power_kobj = kobject_create_and_add("power", NULL);
if (!power_kobj)
return -ENOMEM;
return sysfs_create_group(power_kobj, &attr_group);
}
machine_core_initcall(pseries, pm_init);
#else
static int __init apo_pm_init(void)
{
return (sysfs_create_file(power_kobj, &auto_poweron_attr.attr));
}
machine_device_initcall(pseries, apo_pm_init);
#endif
| linux-master | arch/powerpc/platforms/pseries/power.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* c 2001 PPC 64 Team, IBM Corp
*
* /dev/nvram driver for PPC64
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/uaccess.h>
#include <linux/of.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
#include <asm/machdep.h>
/* Max bytes to read/write in one go */
#define NVRW_CNT 0x20
static unsigned int nvram_size;
static int nvram_fetch, nvram_store;
static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
static DEFINE_SPINLOCK(nvram_lock);
/* See clobbering_unread_rtas_event() */
#define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */
static time64_t last_unread_rtas_event; /* timestamp */
#ifdef CONFIG_PSTORE
time64_t last_rtas_event;
#endif
static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
{
unsigned int i;
unsigned long len;
int done;
unsigned long flags;
char *p = buf;
if (nvram_size == 0 || nvram_fetch == RTAS_UNKNOWN_SERVICE)
return -ENODEV;
if (*index >= nvram_size)
return 0;
i = *index;
if (i + count > nvram_size)
count = nvram_size - i;
spin_lock_irqsave(&nvram_lock, flags);
for (; count != 0; count -= len) {
len = count;
if (len > NVRW_CNT)
len = NVRW_CNT;
if ((rtas_call(nvram_fetch, 3, 2, &done, i, __pa(nvram_buf),
len) != 0) || len != done) {
spin_unlock_irqrestore(&nvram_lock, flags);
return -EIO;
}
memcpy(p, nvram_buf, len);
p += len;
i += len;
}
spin_unlock_irqrestore(&nvram_lock, flags);
*index = i;
return p - buf;
}
static ssize_t pSeries_nvram_write(char *buf, size_t count, loff_t *index)
{
unsigned int i;
unsigned long len;
int done;
unsigned long flags;
const char *p = buf;
if (nvram_size == 0 || nvram_store == RTAS_UNKNOWN_SERVICE)
return -ENODEV;
if (*index >= nvram_size)
return 0;
i = *index;
if (i + count > nvram_size)
count = nvram_size - i;
spin_lock_irqsave(&nvram_lock, flags);
for (; count != 0; count -= len) {
len = count;
if (len > NVRW_CNT)
len = NVRW_CNT;
memcpy(nvram_buf, p, len);
if ((rtas_call(nvram_store, 3, 2, &done, i, __pa(nvram_buf),
len) != 0) || len != done) {
spin_unlock_irqrestore(&nvram_lock, flags);
return -EIO;
}
p += len;
i += len;
}
spin_unlock_irqrestore(&nvram_lock, flags);
*index = i;
return p - buf;
}
static ssize_t pSeries_nvram_get_size(void)
{
return nvram_size ? nvram_size : -ENODEV;
}
/* nvram_write_error_log
*
* We need to buffer the error logs into nvram to ensure that we have
* the failure information to decode.
*/
int nvram_write_error_log(char * buff, int length,
unsigned int err_type, unsigned int error_log_cnt)
{
int rc = nvram_write_os_partition(&rtas_log_partition, buff, length,
err_type, error_log_cnt);
if (!rc) {
last_unread_rtas_event = ktime_get_real_seconds();
#ifdef CONFIG_PSTORE
last_rtas_event = ktime_get_real_seconds();
#endif
}
return rc;
}
/* nvram_read_error_log
*
* Reads nvram for error log for at most 'length'
*/
int nvram_read_error_log(char *buff, int length,
unsigned int *err_type, unsigned int *error_log_cnt)
{
return nvram_read_partition(&rtas_log_partition, buff, length,
err_type, error_log_cnt);
}
/* This doesn't actually zero anything, but it sets the event_logged
* word to tell that this event is safely in syslog.
*/
int nvram_clear_error_log(void)
{
loff_t tmp_index;
int clear_word = ERR_FLAG_ALREADY_LOGGED;
int rc;
if (rtas_log_partition.index == -1)
return -1;
tmp_index = rtas_log_partition.index;
rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
if (rc <= 0) {
printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
return rc;
}
last_unread_rtas_event = 0;
return 0;
}
/*
* Are we using the ibm,rtas-log for oops/panic reports? And if so,
* would logging this oops/panic overwrite an RTAS event that rtas_errd
* hasn't had a chance to read and process? Return 1 if so, else 0.
*
* We assume that if rtas_errd hasn't read the RTAS event in
* NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to.
*/
int clobbering_unread_rtas_event(void)
{
return (oops_log_partition.index == rtas_log_partition.index
&& last_unread_rtas_event
&& ktime_get_real_seconds() - last_unread_rtas_event <=
NVRAM_RTAS_READ_TIMEOUT);
}
static int __init pseries_nvram_init_log_partitions(void)
{
int rc;
/* Scan nvram for partitions */
nvram_scan_partitions();
rc = nvram_init_os_partition(&rtas_log_partition);
nvram_init_oops_partition(rc == 0);
return 0;
}
machine_arch_initcall(pseries, pseries_nvram_init_log_partitions);
int __init pSeries_nvram_init(void)
{
struct device_node *nvram;
const __be32 *nbytes_p;
unsigned int proplen;
nvram = of_find_node_by_type(NULL, "nvram");
if (nvram == NULL)
return -ENODEV;
nbytes_p = of_get_property(nvram, "#bytes", &proplen);
if (nbytes_p == NULL || proplen != sizeof(unsigned int)) {
of_node_put(nvram);
return -EIO;
}
nvram_size = be32_to_cpup(nbytes_p);
nvram_fetch = rtas_function_token(RTAS_FN_NVRAM_FETCH);
nvram_store = rtas_function_token(RTAS_FN_NVRAM_STORE);
printk(KERN_INFO "PPC64 nvram contains %d bytes\n", nvram_size);
of_node_put(nvram);
ppc_md.nvram_read = pSeries_nvram_read;
ppc_md.nvram_write = pSeries_nvram_write;
ppc_md.nvram_size = pSeries_nvram_get_size;
return 0;
}
| linux-master | arch/powerpc/platforms/pseries/nvram.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* hvcserver.c
* Copyright (C) 2004 Ryan S Arnold, IBM Corporation
*
* PPC64 virtual I/O console server support.
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <asm/hvcall.h>
#include <asm/hvcserver.h>
#include <asm/io.h>
#define HVCS_ARCH_VERSION "1.0.0"
MODULE_AUTHOR("Ryan S. Arnold <[email protected]>");
MODULE_DESCRIPTION("IBM hvcs ppc64 API");
MODULE_LICENSE("GPL");
MODULE_VERSION(HVCS_ARCH_VERSION);
/*
* Convert arch specific return codes into relevant errnos. The hvcs
* functions aren't performance sensitive, so this conversion isn't an
* issue.
*/
static int hvcs_convert(long to_convert)
{
switch (to_convert) {
case H_SUCCESS:
return 0;
case H_PARAMETER:
return -EINVAL;
case H_HARDWARE:
return -EIO;
case H_BUSY:
case H_LONG_BUSY_ORDER_1_MSEC:
case H_LONG_BUSY_ORDER_10_MSEC:
case H_LONG_BUSY_ORDER_100_MSEC:
case H_LONG_BUSY_ORDER_1_SEC:
case H_LONG_BUSY_ORDER_10_SEC:
case H_LONG_BUSY_ORDER_100_SEC:
return -EBUSY;
case H_FUNCTION:
default:
return -EPERM;
}
}
/**
* hvcs_free_partner_info - free pi allocated by hvcs_get_partner_info
* @head: list_head pointer for an allocated list of partner info structs to
* free.
*
* This function is used to free the partner info list that was returned by
* calling hvcs_get_partner_info().
*/
int hvcs_free_partner_info(struct list_head *head)
{
struct hvcs_partner_info *pi;
struct list_head *element;
if (!head)
return -EINVAL;
while (!list_empty(head)) {
element = head->next;
pi = list_entry(element, struct hvcs_partner_info, node);
list_del(element);
kfree(pi);
}
return 0;
}
EXPORT_SYMBOL(hvcs_free_partner_info);
/* Helper function for hvcs_get_partner_info */
static int hvcs_next_partner(uint32_t unit_address,
unsigned long last_p_partition_ID,
unsigned long last_p_unit_address, unsigned long *pi_buff)
{
long retval;
retval = plpar_hcall_norets(H_VTERM_PARTNER_INFO, unit_address,
last_p_partition_ID,
last_p_unit_address, virt_to_phys(pi_buff));
return hvcs_convert(retval);
}
/**
* hvcs_get_partner_info - Get all of the partner info for a vty-server adapter
* @unit_address: The unit_address of the vty-server adapter for which this
* function is fetching partner info.
* @head: An initialized list_head pointer to an empty list to use to return the
* list of partner info fetched from the hypervisor to the caller.
* @pi_buff: A page sized buffer pre-allocated prior to calling this function
* that is to be used to be used by firmware as an iterator to keep track
* of the partner info retrieval.
*
* This function returns non-zero on success, or if there is no partner info.
*
* The pi_buff is pre-allocated prior to calling this function because this
* function may be called with a spin_lock held and kmalloc of a page is not
* recommended as GFP_ATOMIC.
*
* The first long of this buffer is used to store a partner unit address. The
* second long is used to store a partner partition ID and starting at
* pi_buff[2] is the 79 character Converged Location Code (diff size than the
* unsigned longs, hence the casting mumbo jumbo you see later).
*
* Invocation of this function should always be followed by an invocation of
* hvcs_free_partner_info() using a pointer to the SAME list head instance
* that was passed as a parameter to this function.
*/
int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
unsigned long *pi_buff)
{
/*
* Dealt with as longs because of the hcall interface even though the
* values are uint32_t.
*/
unsigned long last_p_partition_ID;
unsigned long last_p_unit_address;
struct hvcs_partner_info *next_partner_info = NULL;
int more = 1;
int retval;
/* invalid parameters */
if (!head || !pi_buff)
return -EINVAL;
memset(pi_buff, 0x00, PAGE_SIZE);
last_p_partition_ID = last_p_unit_address = ~0UL;
INIT_LIST_HEAD(head);
do {
retval = hvcs_next_partner(unit_address, last_p_partition_ID,
last_p_unit_address, pi_buff);
if (retval) {
/*
* Don't indicate that we've failed if we have
* any list elements.
*/
if (!list_empty(head))
return 0;
return retval;
}
last_p_partition_ID = be64_to_cpu(pi_buff[0]);
last_p_unit_address = be64_to_cpu(pi_buff[1]);
/* This indicates that there are no further partners */
if (last_p_partition_ID == ~0UL
&& last_p_unit_address == ~0UL)
break;
/* This is a very small struct and will be freed soon in
* hvcs_free_partner_info(). */
next_partner_info = kmalloc(sizeof(struct hvcs_partner_info),
GFP_ATOMIC);
if (!next_partner_info) {
printk(KERN_WARNING "HVCONSOLE: kmalloc() failed to"
" allocate partner info struct.\n");
hvcs_free_partner_info(head);
return -ENOMEM;
}
next_partner_info->unit_address
= (unsigned int)last_p_unit_address;
next_partner_info->partition_ID
= (unsigned int)last_p_partition_ID;
/* copy the Null-term char too */
strscpy(&next_partner_info->location_code[0],
(char *)&pi_buff[2],
sizeof(next_partner_info->location_code));
list_add_tail(&(next_partner_info->node), head);
next_partner_info = NULL;
} while (more);
return 0;
}
EXPORT_SYMBOL(hvcs_get_partner_info);
/**
* hvcs_register_connection - establish a connection between this vty-server and
* a vty.
* @unit_address: The unit address of the vty-server adapter that is to be
* establish a connection.
* @p_partition_ID: The partition ID of the vty adapter that is to be connected.
* @p_unit_address: The unit address of the vty adapter to which the vty-server
* is to be connected.
*
* If this function is called once and -EINVAL is returned it may
* indicate that the partner info needs to be refreshed for the
* target unit address at which point the caller must invoke
* hvcs_get_partner_info() and then call this function again. If,
* for a second time, -EINVAL is returned then it indicates that
* there is probably already a partner connection registered to a
* different vty-server adapter. It is also possible that a second
* -EINVAL may indicate that one of the parms is not valid, for
* instance if the link was removed between the vty-server adapter
* and the vty adapter that you are trying to open. Don't shoot the
* messenger. Firmware implemented it this way.
*/
int hvcs_register_connection( uint32_t unit_address,
uint32_t p_partition_ID, uint32_t p_unit_address)
{
long retval;
retval = plpar_hcall_norets(H_REGISTER_VTERM, unit_address,
p_partition_ID, p_unit_address);
return hvcs_convert(retval);
}
EXPORT_SYMBOL(hvcs_register_connection);
/**
* hvcs_free_connection - free the connection between a vty-server and vty
* @unit_address: The unit address of the vty-server that is to have its
* connection severed.
*
* This function is used to free the partner connection between a vty-server
* adapter and a vty adapter.
*
* If -EBUSY is returned continue to call this function until 0 is returned.
*/
int hvcs_free_connection(uint32_t unit_address)
{
long retval;
retval = plpar_hcall_norets(H_FREE_VTERM, unit_address);
return hvcs_convert(retval);
}
EXPORT_SYMBOL(hvcs_free_connection);
| linux-master | arch/powerpc/platforms/pseries/hvcserver.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
*
* Rewrite, cleanup:
*
* Copyright (C) 2004 Olof Johansson <[email protected]>, IBM Corporation
* Copyright (C) 2006 Olof Johansson <[email protected]>
*
* Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/crash_dump.h>
#include <linux/memory.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/iommu.h>
#include <linux/rculist.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/tce.h>
#include <asm/ppc-pci.h>
#include <asm/udbg.h>
#include <asm/mmzone.h>
#include <asm/plpar_wrappers.h>
#include "pseries.h"
enum {
DDW_QUERY_PE_DMA_WIN = 0,
DDW_CREATE_PE_DMA_WIN = 1,
DDW_REMOVE_PE_DMA_WIN = 2,
DDW_APPLICABLE_SIZE
};
enum {
DDW_EXT_SIZE = 0,
DDW_EXT_RESET_DMA_WIN = 1,
DDW_EXT_QUERY_OUT_SIZE = 2
};
static struct iommu_table *iommu_pseries_alloc_table(int node)
{
struct iommu_table *tbl;
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node);
if (!tbl)
return NULL;
INIT_LIST_HEAD_RCU(&tbl->it_group_list);
kref_init(&tbl->it_kref);
return tbl;
}
static struct iommu_table_group *iommu_pseries_alloc_group(int node)
{
struct iommu_table_group *table_group;
table_group = kzalloc_node(sizeof(*table_group), GFP_KERNEL, node);
if (!table_group)
return NULL;
#ifdef CONFIG_IOMMU_API
table_group->ops = &spapr_tce_table_group_ops;
table_group->pgsizes = SZ_4K;
#endif
table_group->tables[0] = iommu_pseries_alloc_table(node);
if (table_group->tables[0])
return table_group;
kfree(table_group);
return NULL;
}
static void iommu_pseries_free_group(struct iommu_table_group *table_group,
const char *node_name)
{
if (!table_group)
return;
#ifdef CONFIG_IOMMU_API
if (table_group->group) {
iommu_group_put(table_group->group);
BUG_ON(table_group->group);
}
#endif
/* Default DMA window table is at index 0, while DDW at 1. SR-IOV
* adapters only have table on index 1.
*/
if (table_group->tables[0])
iommu_tce_table_put(table_group->tables[0]);
if (table_group->tables[1])
iommu_tce_table_put(table_group->tables[1]);
kfree(table_group);
}
static int tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
unsigned long attrs)
{
u64 proto_tce;
__be64 *tcep;
u64 rpn;
const unsigned long tceshift = tbl->it_page_shift;
const unsigned long pagesize = IOMMU_PAGE_SIZE(tbl);
proto_tce = TCE_PCI_READ; // Read allowed
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
tcep = ((__be64 *)tbl->it_base) + index;
while (npages--) {
/* can't move this out since we might cross MEMBLOCK boundary */
rpn = __pa(uaddr) >> tceshift;
*tcep = cpu_to_be64(proto_tce | rpn << tceshift);
uaddr += pagesize;
tcep++;
}
return 0;
}
static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
{
__be64 *tcep;
tcep = ((__be64 *)tbl->it_base) + index;
while (npages--)
*(tcep++) = 0;
}
static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
{
__be64 *tcep;
tcep = ((__be64 *)tbl->it_base) + index;
return be64_to_cpu(*tcep);
}
static void tce_free_pSeriesLP(unsigned long liobn, long, long, long);
static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
unsigned long attrs)
{
u64 rc = 0;
u64 proto_tce, tce;
u64 rpn;
int ret = 0;
long tcenum_start = tcenum, npages_start = npages;
rpn = __pa(uaddr) >> tceshift;
proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
while (npages--) {
tce = proto_tce | rpn << tceshift;
rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
tce_free_pSeriesLP(liobn, tcenum_start, tceshift,
(npages_start - (npages + 1)));
break;
}
if (rc && printk_ratelimit()) {
printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
printk("\tindex = 0x%llx\n", (u64)liobn);
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
printk("\ttce val = 0x%llx\n", tce );
dump_stack();
}
tcenum++;
rpn++;
}
return ret;
}
static DEFINE_PER_CPU(__be64 *, tce_page);
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
unsigned long attrs)
{
u64 rc = 0;
u64 proto_tce;
__be64 *tcep;
u64 rpn;
long l, limit;
long tcenum_start = tcenum, npages_start = npages;
int ret = 0;
unsigned long flags;
const unsigned long tceshift = tbl->it_page_shift;
if ((npages == 1) || !firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
return tce_build_pSeriesLP(tbl->it_index, tcenum,
tceshift, npages, uaddr,
direction, attrs);
}
local_irq_save(flags); /* to protect tcep and the page behind it */
tcep = __this_cpu_read(tce_page);
/* This is safe to do since interrupts are off when we're called
* from iommu_alloc{,_sg}()
*/
if (!tcep) {
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
local_irq_restore(flags);
return tce_build_pSeriesLP(tbl->it_index, tcenum,
tceshift,
npages, uaddr, direction, attrs);
}
__this_cpu_write(tce_page, tcep);
}
rpn = __pa(uaddr) >> tceshift;
proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
/* We can map max one pageful of TCEs at a time */
do {
/*
* Set up the page with TCE data, looping through and setting
* the values.
*/
limit = min_t(long, npages, 4096 / TCE_ENTRY_SIZE);
for (l = 0; l < limit; l++) {
tcep[l] = cpu_to_be64(proto_tce | rpn << tceshift);
rpn++;
}
rc = plpar_tce_put_indirect((u64)tbl->it_index,
(u64)tcenum << tceshift,
(u64)__pa(tcep),
limit);
npages -= limit;
tcenum += limit;
} while (npages > 0 && !rc);
local_irq_restore(flags);
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
tce_freemulti_pSeriesLP(tbl, tcenum_start,
(npages_start - (npages + limit)));
return ret;
}
if (rc && printk_ratelimit()) {
printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\tnpages = 0x%llx\n", (u64)npages);
printk("\ttce[0] val = 0x%llx\n", tcep[0]);
dump_stack();
}
return ret;
}
static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
long npages)
{
u64 rc;
while (npages--) {
rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, 0);
if (rc && printk_ratelimit()) {
printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
printk("\tindex = 0x%llx\n", (u64)liobn);
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
dump_stack();
}
tcenum++;
}
}
static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
{
u64 rc;
long rpages = npages;
unsigned long limit;
if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
return tce_free_pSeriesLP(tbl->it_index, tcenum,
tbl->it_page_shift, npages);
do {
limit = min_t(unsigned long, rpages, 512);
rc = plpar_tce_stuff((u64)tbl->it_index,
(u64)tcenum << tbl->it_page_shift, 0, limit);
rpages -= limit;
tcenum += limit;
} while (rpages > 0 && !rc);
if (rc && printk_ratelimit()) {
printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
printk("\trc = %lld\n", rc);
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\tnpages = 0x%llx\n", (u64)npages);
dump_stack();
}
}
static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
{
u64 rc;
unsigned long tce_ret;
rc = plpar_tce_get((u64)tbl->it_index,
(u64)tcenum << tbl->it_page_shift, &tce_ret);
if (rc && printk_ratelimit()) {
printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
dump_stack();
}
return tce_ret;
}
/* this is compatible with cells for the device tree property */
struct dynamic_dma_window_prop {
__be32 liobn; /* tce table number */
__be64 dma_base; /* address hi,lo */
__be32 tce_shift; /* ilog2(tce_page_size) */
__be32 window_shift; /* ilog2(tce_window_size) */
};
struct dma_win {
struct device_node *device;
const struct dynamic_dma_window_prop *prop;
bool direct;
struct list_head list;
};
/* Dynamic DMA Window support */
struct ddw_query_response {
u32 windows_available;
u64 largest_available_block;
u32 page_size;
u32 migration_capable;
};
struct ddw_create_response {
u32 liobn;
u32 addr_hi;
u32 addr_lo;
};
static LIST_HEAD(dma_win_list);
/* prevents races between memory on/offline and window creation */
static DEFINE_SPINLOCK(dma_win_list_lock);
/* protects initializing window twice for same device */
static DEFINE_MUTEX(dma_win_init_mutex);
static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
unsigned long num_pfn, const void *arg)
{
const struct dynamic_dma_window_prop *maprange = arg;
int rc;
u64 tce_size, num_tce, dma_offset, next;
u32 tce_shift;
long limit;
tce_shift = be32_to_cpu(maprange->tce_shift);
tce_size = 1ULL << tce_shift;
next = start_pfn << PAGE_SHIFT;
num_tce = num_pfn << PAGE_SHIFT;
/* round back to the beginning of the tce page size */
num_tce += next & (tce_size - 1);
next &= ~(tce_size - 1);
/* covert to number of tces */
num_tce |= tce_size - 1;
num_tce >>= tce_shift;
do {
/*
* Set up the page with TCE data, looping through and setting
* the values.
*/
limit = min_t(long, num_tce, 512);
dma_offset = next + be64_to_cpu(maprange->dma_base);
rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
dma_offset,
0, limit);
next += limit * tce_size;
num_tce -= limit;
} while (num_tce > 0 && !rc);
return rc;
}
static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
unsigned long num_pfn, const void *arg)
{
const struct dynamic_dma_window_prop *maprange = arg;
u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
__be64 *tcep;
u32 tce_shift;
u64 rc = 0;
long l, limit;
if (!firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
be64_to_cpu(maprange->dma_base);
unsigned long tcenum = dmastart >> tceshift;
unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
void *uaddr = __va(start_pfn << PAGE_SHIFT);
return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
tcenum, tceshift, npages, (unsigned long) uaddr,
DMA_BIDIRECTIONAL, 0);
}
local_irq_disable(); /* to protect tcep and the page behind it */
tcep = __this_cpu_read(tce_page);
if (!tcep) {
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
if (!tcep) {
local_irq_enable();
return -ENOMEM;
}
__this_cpu_write(tce_page, tcep);
}
proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
liobn = (u64)be32_to_cpu(maprange->liobn);
tce_shift = be32_to_cpu(maprange->tce_shift);
tce_size = 1ULL << tce_shift;
next = start_pfn << PAGE_SHIFT;
num_tce = num_pfn << PAGE_SHIFT;
/* round back to the beginning of the tce page size */
num_tce += next & (tce_size - 1);
next &= ~(tce_size - 1);
/* covert to number of tces */
num_tce |= tce_size - 1;
num_tce >>= tce_shift;
/* We can map max one pageful of TCEs at a time */
do {
/*
* Set up the page with TCE data, looping through and setting
* the values.
*/
limit = min_t(long, num_tce, 4096 / TCE_ENTRY_SIZE);
dma_offset = next + be64_to_cpu(maprange->dma_base);
for (l = 0; l < limit; l++) {
tcep[l] = cpu_to_be64(proto_tce | next);
next += tce_size;
}
rc = plpar_tce_put_indirect(liobn,
dma_offset,
(u64)__pa(tcep),
limit);
num_tce -= limit;
} while (num_tce > 0 && !rc);
/* error cleanup: caller will clear whole range */
local_irq_enable();
return rc;
}
static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
unsigned long num_pfn, void *arg)
{
return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
}
static void iommu_table_setparms_common(struct iommu_table *tbl, unsigned long busno,
unsigned long liobn, unsigned long win_addr,
unsigned long window_size, unsigned long page_shift,
void *base, struct iommu_table_ops *table_ops)
{
tbl->it_busno = busno;
tbl->it_index = liobn;
tbl->it_offset = win_addr >> page_shift;
tbl->it_size = window_size >> page_shift;
tbl->it_page_shift = page_shift;
tbl->it_base = (unsigned long)base;
tbl->it_blocksize = 16;
tbl->it_type = TCE_PCI;
tbl->it_ops = table_ops;
}
struct iommu_table_ops iommu_table_pseries_ops;
static void iommu_table_setparms(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl)
{
struct device_node *node;
const unsigned long *basep;
const u32 *sizep;
/* Test if we are going over 2GB of DMA space */
if (phb->dma_window_base_cur + phb->dma_window_size > SZ_2G) {
udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
}
node = phb->dn;
basep = of_get_property(node, "linux,tce-base", NULL);
sizep = of_get_property(node, "linux,tce-size", NULL);
if (basep == NULL || sizep == NULL) {
printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %pOF has "
"missing tce entries !\n", dn);
return;
}
iommu_table_setparms_common(tbl, phb->bus->number, 0, phb->dma_window_base_cur,
phb->dma_window_size, IOMMU_PAGE_SHIFT_4K,
__va(*basep), &iommu_table_pseries_ops);
if (!is_kdump_kernel())
memset((void *)tbl->it_base, 0, *sizep);
phb->dma_window_base_cur += phb->dma_window_size;
}
struct iommu_table_ops iommu_table_lpar_multi_ops;
/*
* iommu_table_setparms_lpar
*
* Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
*/
static void iommu_table_setparms_lpar(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl,
struct iommu_table_group *table_group,
const __be32 *dma_window)
{
unsigned long offset, size, liobn;
of_parse_dma_window(dn, dma_window, &liobn, &offset, &size);
iommu_table_setparms_common(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, NULL,
&iommu_table_lpar_multi_ops);
table_group->tce32_start = offset;
table_group->tce32_size = size;
}
struct iommu_table_ops iommu_table_pseries_ops = {
.set = tce_build_pSeries,
.clear = tce_free_pSeries,
.get = tce_get_pseries
};
static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
{
struct device_node *dn;
struct iommu_table *tbl;
struct device_node *isa_dn, *isa_dn_orig;
struct device_node *tmp;
struct pci_dn *pci;
int children;
dn = pci_bus_to_OF_node(bus);
pr_debug("pci_dma_bus_setup_pSeries: setting up bus %pOF\n", dn);
if (bus->self) {
/* This is not a root bus, any setup will be done for the
* device-side of the bridge in iommu_dev_setup_pSeries().
*/
return;
}
pci = PCI_DN(dn);
/* Check if the ISA bus on the system is under
* this PHB.
*/
isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
while (isa_dn && isa_dn != dn)
isa_dn = isa_dn->parent;
of_node_put(isa_dn_orig);
/* Count number of direct PCI children of the PHB. */
for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
children++;
pr_debug("Children: %d\n", children);
/* Calculate amount of DMA window per slot. Each window must be
* a power of two (due to pci_alloc_consistent requirements).
*
* Keep 256MB aside for PHBs with ISA.
*/
if (!isa_dn) {
/* No ISA/IDE - just set window size and return */
pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
while (pci->phb->dma_window_size * children > 0x80000000ul)
pci->phb->dma_window_size >>= 1;
pr_debug("No ISA/IDE, window size is 0x%llx\n",
pci->phb->dma_window_size);
pci->phb->dma_window_base_cur = 0;
return;
}
/* If we have ISA, then we probably have an IDE
* controller too. Allocate a 128MB table but
* skip the first 128MB to avoid stepping on ISA
* space.
*/
pci->phb->dma_window_size = 0x8000000ul;
pci->phb->dma_window_base_cur = 0x8000000ul;
pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
tbl = pci->table_group->tables[0];
iommu_table_setparms(pci->phb, dn, tbl);
if (!iommu_init_table(tbl, pci->phb->node, 0, 0))
panic("Failed to initialize iommu table");
/* Divide the rest (1.75GB) among the children */
pci->phb->dma_window_size = 0x80000000ul;
while (pci->phb->dma_window_size * children > 0x70000000ul)
pci->phb->dma_window_size >>= 1;
pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
}
#ifdef CONFIG_IOMMU_API
static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
long *tce, enum dma_data_direction *direction)
{
long rc;
unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
unsigned long flags, oldtce = 0;
u64 proto_tce = iommu_direction_to_tce_perm(*direction);
unsigned long newtce = *tce | proto_tce;
spin_lock_irqsave(&tbl->large_pool.lock, flags);
rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce);
if (!rc)
rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce);
if (!rc) {
*direction = iommu_tce_direction(oldtce);
*tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
}
spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
return rc;
}
#endif
struct iommu_table_ops iommu_table_lpar_multi_ops = {
.set = tce_buildmulti_pSeriesLP,
#ifdef CONFIG_IOMMU_API
.xchg_no_kill = tce_exchange_pseries,
#endif
.clear = tce_freemulti_pSeriesLP,
.get = tce_get_pSeriesLP
};
/*
* Find nearest ibm,dma-window (default DMA window) or direct DMA window or
* dynamic 64bit DMA window, walking up the device tree.
*/
static struct device_node *pci_dma_find(struct device_node *dn,
const __be32 **dma_window)
{
const __be32 *dw = NULL;
for ( ; dn && PCI_DN(dn); dn = dn->parent) {
dw = of_get_property(dn, "ibm,dma-window", NULL);
if (dw) {
if (dma_window)
*dma_window = dw;
return dn;
}
dw = of_get_property(dn, DIRECT64_PROPNAME, NULL);
if (dw)
return dn;
dw = of_get_property(dn, DMA64_PROPNAME, NULL);
if (dw)
return dn;
}
return NULL;
}
static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
{
struct iommu_table *tbl;
struct device_node *dn, *pdn;
struct pci_dn *ppci;
const __be32 *dma_window = NULL;
dn = pci_bus_to_OF_node(bus);
pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
dn);
pdn = pci_dma_find(dn, &dma_window);
if (dma_window == NULL)
pr_debug(" no ibm,dma-window property !\n");
ppci = PCI_DN(pdn);
pr_debug(" parent is %pOF, iommu_table: 0x%p\n",
pdn, ppci->table_group);
if (!ppci->table_group) {
ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
tbl = ppci->table_group->tables[0];
if (dma_window) {
iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
ppci->table_group, dma_window);
if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
panic("Failed to initialize iommu table");
}
iommu_register_group(ppci->table_group,
pci_domain_nr(bus), 0);
pr_debug(" created table: %p\n", ppci->table_group);
}
}
static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
{
struct device_node *dn;
struct iommu_table *tbl;
pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
dn = dev->dev.of_node;
/* If we're the direct child of a root bus, then we need to allocate
* an iommu table ourselves. The bus setup code should have setup
* the window sizes already.
*/
if (!dev->bus->self) {
struct pci_controller *phb = PCI_DN(dn)->phb;
pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
PCI_DN(dn)->table_group = iommu_pseries_alloc_group(phb->node);
tbl = PCI_DN(dn)->table_group->tables[0];
iommu_table_setparms(phb, dn, tbl);
if (!iommu_init_table(tbl, phb->node, 0, 0))
panic("Failed to initialize iommu table");
set_iommu_table_base(&dev->dev, tbl);
return;
}
/* If this device is further down the bus tree, search upwards until
* an already allocated iommu table is found and use that.
*/
while (dn && PCI_DN(dn) && PCI_DN(dn)->table_group == NULL)
dn = dn->parent;
if (dn && PCI_DN(dn))
set_iommu_table_base(&dev->dev,
PCI_DN(dn)->table_group->tables[0]);
else
printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
pci_name(dev));
}
static int __read_mostly disable_ddw;
static int __init disable_ddw_setup(char *str)
{
disable_ddw = 1;
printk(KERN_INFO "ppc iommu: disabling ddw.\n");
return 0;
}
early_param("disable_ddw", disable_ddw_setup);
static void clean_dma_window(struct device_node *np, struct dynamic_dma_window_prop *dwp)
{
int ret;
ret = tce_clearrange_multi_pSeriesLP(0,
1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
if (ret)
pr_warn("%pOF failed to clear tces in window.\n",
np);
else
pr_debug("%pOF successfully cleared tces in window.\n",
np);
}
/*
* Call only if DMA window is clean.
*/
static void __remove_dma_window(struct device_node *np, u32 *ddw_avail, u64 liobn)
{
int ret;
ret = rtas_call(ddw_avail[DDW_REMOVE_PE_DMA_WIN], 1, 1, NULL, liobn);
if (ret)
pr_warn("%pOF: failed to remove DMA window: rtas returned "
"%d to ibm,remove-pe-dma-window(%x) %llx\n",
np, ret, ddw_avail[DDW_REMOVE_PE_DMA_WIN], liobn);
else
pr_debug("%pOF: successfully removed DMA window: rtas returned "
"%d to ibm,remove-pe-dma-window(%x) %llx\n",
np, ret, ddw_avail[DDW_REMOVE_PE_DMA_WIN], liobn);
}
static void remove_dma_window(struct device_node *np, u32 *ddw_avail,
struct property *win)
{
struct dynamic_dma_window_prop *dwp;
u64 liobn;
dwp = win->value;
liobn = (u64)be32_to_cpu(dwp->liobn);
clean_dma_window(np, dwp);
__remove_dma_window(np, ddw_avail, liobn);
}
static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_name)
{
struct property *win;
u32 ddw_avail[DDW_APPLICABLE_SIZE];
int ret = 0;
win = of_find_property(np, win_name, NULL);
if (!win)
return -EINVAL;
ret = of_property_read_u32_array(np, "ibm,ddw-applicable",
&ddw_avail[0], DDW_APPLICABLE_SIZE);
if (ret)
return 0;
if (win->length >= sizeof(struct dynamic_dma_window_prop))
remove_dma_window(np, ddw_avail, win);
if (!remove_prop)
return 0;
ret = of_remove_property(np, win);
if (ret)
pr_warn("%pOF: failed to remove DMA window property: %d\n",
np, ret);
return 0;
}
static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift)
{
struct dma_win *window;
const struct dynamic_dma_window_prop *dma64;
bool found = false;
spin_lock(&dma_win_list_lock);
/* check if we already created a window and dupe that config if so */
list_for_each_entry(window, &dma_win_list, list) {
if (window->device == pdn) {
dma64 = window->prop;
*dma_addr = be64_to_cpu(dma64->dma_base);
*window_shift = be32_to_cpu(dma64->window_shift);
found = true;
break;
}
}
spin_unlock(&dma_win_list_lock);
return found;
}
static struct dma_win *ddw_list_new_entry(struct device_node *pdn,
const struct dynamic_dma_window_prop *dma64)
{
struct dma_win *window;
window = kzalloc(sizeof(*window), GFP_KERNEL);
if (!window)
return NULL;
window->device = pdn;
window->prop = dma64;
window->direct = false;
return window;
}
static void find_existing_ddw_windows_named(const char *name)
{
int len;
struct device_node *pdn;
struct dma_win *window;
const struct dynamic_dma_window_prop *dma64;
for_each_node_with_property(pdn, name) {
dma64 = of_get_property(pdn, name, &len);
if (!dma64 || len < sizeof(*dma64)) {
remove_ddw(pdn, true, name);
continue;
}
window = ddw_list_new_entry(pdn, dma64);
if (!window) {
of_node_put(pdn);
break;
}
spin_lock(&dma_win_list_lock);
list_add(&window->list, &dma_win_list);
spin_unlock(&dma_win_list_lock);
}
}
static int find_existing_ddw_windows(void)
{
if (!firmware_has_feature(FW_FEATURE_LPAR))
return 0;
find_existing_ddw_windows_named(DIRECT64_PROPNAME);
find_existing_ddw_windows_named(DMA64_PROPNAME);
return 0;
}
machine_arch_initcall(pseries, find_existing_ddw_windows);
/**
* ddw_read_ext - Get the value of an DDW extension
* @np: device node from which the extension value is to be read.
* @extnum: index number of the extension.
* @value: pointer to return value, modified when extension is available.
*
* Checks if "ibm,ddw-extensions" exists for this node, and get the value
* on index 'extnum'.
* It can be used only to check if a property exists, passing value == NULL.
*
* Returns:
* 0 if extension successfully read
* -EINVAL if the "ibm,ddw-extensions" does not exist,
* -ENODATA if "ibm,ddw-extensions" does not have a value, and
* -EOVERFLOW if "ibm,ddw-extensions" does not contain this extension.
*/
static inline int ddw_read_ext(const struct device_node *np, int extnum,
u32 *value)
{
static const char propname[] = "ibm,ddw-extensions";
u32 count;
int ret;
ret = of_property_read_u32_index(np, propname, DDW_EXT_SIZE, &count);
if (ret)
return ret;
if (count < extnum)
return -EOVERFLOW;
if (!value)
value = &count;
return of_property_read_u32_index(np, propname, extnum, value);
}
static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
struct ddw_query_response *query,
struct device_node *parent)
{
struct device_node *dn;
struct pci_dn *pdn;
u32 cfg_addr, ext_query, query_out[5];
u64 buid;
int ret, out_sz;
/*
* From LoPAR level 2.8, "ibm,ddw-extensions" index 3 can rule how many
* output parameters ibm,query-pe-dma-windows will have, ranging from
* 5 to 6.
*/
ret = ddw_read_ext(parent, DDW_EXT_QUERY_OUT_SIZE, &ext_query);
if (!ret && ext_query == 1)
out_sz = 6;
else
out_sz = 5;
/*
* Get the config address and phb buid of the PE window.
* Rely on eeh to retrieve this for us.
* Retrieve them from the pci device, not the node with the
* dma-window property
*/
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
buid = pdn->phb->buid;
cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
ret = rtas_call(ddw_avail[DDW_QUERY_PE_DMA_WIN], 3, out_sz, query_out,
cfg_addr, BUID_HI(buid), BUID_LO(buid));
switch (out_sz) {
case 5:
query->windows_available = query_out[0];
query->largest_available_block = query_out[1];
query->page_size = query_out[2];
query->migration_capable = query_out[3];
break;
case 6:
query->windows_available = query_out[0];
query->largest_available_block = ((u64)query_out[1] << 32) |
query_out[2];
query->page_size = query_out[3];
query->migration_capable = query_out[4];
break;
}
dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x returned %d, lb=%llx ps=%x wn=%d\n",
ddw_avail[DDW_QUERY_PE_DMA_WIN], cfg_addr, BUID_HI(buid),
BUID_LO(buid), ret, query->largest_available_block,
query->page_size, query->windows_available);
return ret;
}
static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
struct ddw_create_response *create, int page_shift,
int window_shift)
{
struct device_node *dn;
struct pci_dn *pdn;
u32 cfg_addr;
u64 buid;
int ret;
/*
* Get the config address and phb buid of the PE window.
* Rely on eeh to retrieve this for us.
* Retrieve them from the pci device, not the node with the
* dma-window property
*/
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
buid = pdn->phb->buid;
cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
do {
/* extra outputs are LIOBN and dma-addr (hi, lo) */
ret = rtas_call(ddw_avail[DDW_CREATE_PE_DMA_WIN], 5, 4,
(u32 *)create, cfg_addr, BUID_HI(buid),
BUID_LO(buid), page_shift, window_shift);
} while (rtas_busy_delay(ret));
dev_info(&dev->dev,
"ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
"(liobn = 0x%x starting addr = %x %x)\n",
ddw_avail[DDW_CREATE_PE_DMA_WIN], cfg_addr, BUID_HI(buid),
BUID_LO(buid), page_shift, window_shift, ret, create->liobn,
create->addr_hi, create->addr_lo);
return ret;
}
struct failed_ddw_pdn {
struct device_node *pdn;
struct list_head list;
};
static LIST_HEAD(failed_ddw_pdn_list);
static phys_addr_t ddw_memory_hotplug_max(void)
{
resource_size_t max_addr = memory_hotplug_max();
struct device_node *memory;
for_each_node_by_type(memory, "memory") {
struct resource res;
if (of_address_to_resource(memory, 0, &res))
continue;
max_addr = max_t(resource_size_t, max_addr, res.end + 1);
}
return max_addr;
}
/*
* Platforms supporting the DDW option starting with LoPAR level 2.7 implement
* ibm,ddw-extensions, which carries the rtas token for
* ibm,reset-pe-dma-windows.
* That rtas-call can be used to restore the default DMA window for the device.
*/
static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
{
int ret;
u32 cfg_addr, reset_dma_win;
u64 buid;
struct device_node *dn;
struct pci_dn *pdn;
ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win);
if (ret)
return;
dn = pci_device_to_OF_node(dev);
pdn = PCI_DN(dn);
buid = pdn->phb->buid;
cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8);
ret = rtas_call(reset_dma_win, 3, 1, NULL, cfg_addr, BUID_HI(buid),
BUID_LO(buid));
if (ret)
dev_info(&dev->dev,
"ibm,reset-pe-dma-windows(%x) %x %x %x returned %d ",
reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid),
ret);
}
/* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
static int iommu_get_page_shift(u32 query_page_size)
{
/* Supported IO page-sizes according to LoPAR, note that 2M is out of order */
const int shift[] = {
__builtin_ctzll(SZ_4K), __builtin_ctzll(SZ_64K), __builtin_ctzll(SZ_16M),
__builtin_ctzll(SZ_32M), __builtin_ctzll(SZ_64M), __builtin_ctzll(SZ_128M),
__builtin_ctzll(SZ_256M), __builtin_ctzll(SZ_16G), __builtin_ctzll(SZ_2M)
};
int i = ARRAY_SIZE(shift) - 1;
int ret = 0;
/*
* On LoPAR, ibm,query-pe-dma-window outputs "IO Page Sizes" using a bit field:
* - bit 31 means 4k pages are supported,
* - bit 30 means 64k pages are supported, and so on.
* Larger pagesizes map more memory with the same amount of TCEs, so start probing them.
*/
for (; i >= 0 ; i--) {
if (query_page_size & (1 << i))
ret = max(ret, shift[i]);
}
return ret;
}
static struct property *ddw_property_create(const char *propname, u32 liobn, u64 dma_addr,
u32 page_shift, u32 window_shift)
{
struct dynamic_dma_window_prop *ddwprop;
struct property *win64;
win64 = kzalloc(sizeof(*win64), GFP_KERNEL);
if (!win64)
return NULL;
win64->name = kstrdup(propname, GFP_KERNEL);
ddwprop = kzalloc(sizeof(*ddwprop), GFP_KERNEL);
win64->value = ddwprop;
win64->length = sizeof(*ddwprop);
if (!win64->name || !win64->value) {
kfree(win64->name);
kfree(win64->value);
kfree(win64);
return NULL;
}
ddwprop->liobn = cpu_to_be32(liobn);
ddwprop->dma_base = cpu_to_be64(dma_addr);
ddwprop->tce_shift = cpu_to_be32(page_shift);
ddwprop->window_shift = cpu_to_be32(window_shift);
return win64;
}
/*
* If the PE supports dynamic dma windows, and there is space for a table
* that can map all pages in a linear offset, then setup such a table,
* and record the dma-offset in the struct device.
*
* dev: the pci device we are checking
* pdn: the parent pe node with the ibm,dma_window property
* Future: also check if we can remap the base window for our base page size
*
* returns true if can map all pages (direct mapping), false otherwise..
*/
static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
{
int len = 0, ret;
int max_ram_len = order_base_2(ddw_memory_hotplug_max());
struct ddw_query_response query;
struct ddw_create_response create;
int page_shift;
u64 win_addr;
const char *win_name;
struct device_node *dn;
u32 ddw_avail[DDW_APPLICABLE_SIZE];
struct dma_win *window;
struct property *win64;
struct failed_ddw_pdn *fpdn;
bool default_win_removed = false, direct_mapping = false;
bool pmem_present;
struct pci_dn *pci = PCI_DN(pdn);
struct property *default_win = NULL;
dn = of_find_node_by_type(NULL, "ibm,pmemory");
pmem_present = dn != NULL;
of_node_put(dn);
mutex_lock(&dma_win_init_mutex);
if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
direct_mapping = (len >= max_ram_len);
goto out_unlock;
}
/*
* If we already went through this for a previous function of
* the same device and failed, we don't want to muck with the
* DMA window again, as it will race with in-flight operations
* and can lead to EEHs. The above mutex protects access to the
* list.
*/
list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
if (fpdn->pdn == pdn)
goto out_unlock;
}
/*
* the ibm,ddw-applicable property holds the tokens for:
* ibm,query-pe-dma-window
* ibm,create-pe-dma-window
* ibm,remove-pe-dma-window
* for the given node in that order.
* the property is actually in the parent, not the PE
*/
ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable",
&ddw_avail[0], DDW_APPLICABLE_SIZE);
if (ret)
goto out_failed;
/*
* Query if there is a second window of size to map the
* whole partition. Query returns number of windows, largest
* block assigned to PE (partition endpoint), and two bitmasks
* of page sizes: supported and supported for migrate-dma.
*/
dn = pci_device_to_OF_node(dev);
ret = query_ddw(dev, ddw_avail, &query, pdn);
if (ret != 0)
goto out_failed;
/*
* If there is no window available, remove the default DMA window,
* if it's present. This will make all the resources available to the
* new DDW window.
* If anything fails after this, we need to restore it, so also check
* for extensions presence.
*/
if (query.windows_available == 0) {
int reset_win_ext;
/* DDW + IOMMU on single window may fail if there is any allocation */
if (iommu_table_in_use(pci->table_group->tables[0])) {
dev_warn(&dev->dev, "current IOMMU table in use, can't be replaced.\n");
goto out_failed;
}
default_win = of_find_property(pdn, "ibm,dma-window", NULL);
if (!default_win)
goto out_failed;
reset_win_ext = ddw_read_ext(pdn, DDW_EXT_RESET_DMA_WIN, NULL);
if (reset_win_ext)
goto out_failed;
remove_dma_window(pdn, ddw_avail, default_win);
default_win_removed = true;
/* Query again, to check if the window is available */
ret = query_ddw(dev, ddw_avail, &query, pdn);
if (ret != 0)
goto out_failed;
if (query.windows_available == 0) {
/* no windows are available for this device. */
dev_dbg(&dev->dev, "no free dynamic windows");
goto out_failed;
}
}
page_shift = iommu_get_page_shift(query.page_size);
if (!page_shift) {
dev_dbg(&dev->dev, "no supported page size in mask %x",
query.page_size);
goto out_failed;
}
/*
* The "ibm,pmemory" can appear anywhere in the address space.
* Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
* for the upper limit and fallback to max RAM otherwise but this
* disables device::dma_ops_bypass.
*/
len = max_ram_len;
if (pmem_present) {
if (query.largest_available_block >=
(1ULL << (MAX_PHYSMEM_BITS - page_shift)))
len = MAX_PHYSMEM_BITS;
else
dev_info(&dev->dev, "Skipping ibm,pmemory");
}
/* check if the available block * number of ptes will map everything */
if (query.largest_available_block < (1ULL << (len - page_shift))) {
dev_dbg(&dev->dev,
"can't map partition max 0x%llx with %llu %llu-sized pages\n",
1ULL << len,
query.largest_available_block,
1ULL << page_shift);
len = order_base_2(query.largest_available_block << page_shift);
win_name = DMA64_PROPNAME;
} else {
direct_mapping = !default_win_removed ||
(len == MAX_PHYSMEM_BITS) ||
(!pmem_present && (len == max_ram_len));
win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME;
}
ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
if (ret != 0)
goto out_failed;
dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %pOF\n",
create.liobn, dn);
win_addr = ((u64)create.addr_hi << 32) | create.addr_lo;
win64 = ddw_property_create(win_name, create.liobn, win_addr, page_shift, len);
if (!win64) {
dev_info(&dev->dev,
"couldn't allocate property, property name, or value\n");
goto out_remove_win;
}
ret = of_add_property(pdn, win64);
if (ret) {
dev_err(&dev->dev, "unable to add DMA window property for %pOF: %d",
pdn, ret);
goto out_free_prop;
}
window = ddw_list_new_entry(pdn, win64->value);
if (!window)
goto out_del_prop;
if (direct_mapping) {
window->direct = true;
/* DDW maps the whole partition, so enable direct DMA mapping */
ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
win64->value, tce_setrange_multi_pSeriesLP_walk);
if (ret) {
dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
dn, ret);
/* Make sure to clean DDW if any TCE was set*/
clean_dma_window(pdn, win64->value);
goto out_del_list;
}
} else {
struct iommu_table *newtbl;
int i;
unsigned long start = 0, end = 0;
window->direct = false;
for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) {
const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM;
/* Look for MMIO32 */
if ((pci->phb->mem_resources[i].flags & mask) == IORESOURCE_MEM) {
start = pci->phb->mem_resources[i].start;
end = pci->phb->mem_resources[i].end;
break;
}
}
/* New table for using DDW instead of the default DMA window */
newtbl = iommu_pseries_alloc_table(pci->phb->node);
if (!newtbl) {
dev_dbg(&dev->dev, "couldn't create new IOMMU table\n");
goto out_del_list;
}
iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, win_addr,
1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops);
iommu_init_table(newtbl, pci->phb->node, start, end);
pci->table_group->tables[1] = newtbl;
set_iommu_table_base(&dev->dev, newtbl);
}
if (default_win_removed) {
iommu_tce_table_put(pci->table_group->tables[0]);
pci->table_group->tables[0] = NULL;
/* default_win is valid here because default_win_removed == true */
of_remove_property(pdn, default_win);
dev_info(&dev->dev, "Removed default DMA window for %pOF\n", pdn);
}
spin_lock(&dma_win_list_lock);
list_add(&window->list, &dma_win_list);
spin_unlock(&dma_win_list_lock);
dev->dev.archdata.dma_offset = win_addr;
goto out_unlock;
out_del_list:
kfree(window);
out_del_prop:
of_remove_property(pdn, win64);
out_free_prop:
kfree(win64->name);
kfree(win64->value);
kfree(win64);
out_remove_win:
/* DDW is clean, so it's ok to call this directly. */
__remove_dma_window(pdn, ddw_avail, create.liobn);
out_failed:
if (default_win_removed)
reset_dma_window(dev, pdn);
fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
if (!fpdn)
goto out_unlock;
fpdn->pdn = pdn;
list_add(&fpdn->list, &failed_ddw_pdn_list);
out_unlock:
mutex_unlock(&dma_win_init_mutex);
/*
* If we have persistent memory and the window size is only as big
* as RAM, then we failed to create a window to cover persistent
* memory and need to set the DMA limit.
*/
if (pmem_present && direct_mapping && len == max_ram_len)
dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len);
return direct_mapping;
}
static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
{
struct device_node *pdn, *dn;
struct iommu_table *tbl;
const __be32 *dma_window = NULL;
struct pci_dn *pci;
pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
/* dev setup for LPAR is a little tricky, since the device tree might
* contain the dma-window properties per-device and not necessarily
* for the bus. So we need to search upwards in the tree until we
* either hit a dma-window property, OR find a parent with a table
* already allocated.
*/
dn = pci_device_to_OF_node(dev);
pr_debug(" node is %pOF\n", dn);
pdn = pci_dma_find(dn, &dma_window);
if (!pdn || !PCI_DN(pdn)) {
printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
"no DMA window found for pci dev=%s dn=%pOF\n",
pci_name(dev), dn);
return;
}
pr_debug(" parent is %pOF\n", pdn);
pci = PCI_DN(pdn);
if (!pci->table_group) {
pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
tbl = pci->table_group->tables[0];
iommu_table_setparms_lpar(pci->phb, pdn, tbl,
pci->table_group, dma_window);
iommu_init_table(tbl, pci->phb->node, 0, 0);
iommu_register_group(pci->table_group,
pci_domain_nr(pci->phb->bus), 0);
pr_debug(" created table: %p\n", pci->table_group);
} else {
pr_debug(" found DMA window, table: %p\n", pci->table_group);
}
set_iommu_table_base(&dev->dev, pci->table_group->tables[0]);
iommu_add_device(pci->table_group, &dev->dev);
}
static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
{
struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
/* only attempt to use a new window if 64-bit DMA is requested */
if (dma_mask < DMA_BIT_MASK(64))
return false;
dev_dbg(&pdev->dev, "node is %pOF\n", dn);
/*
* the device tree might contain the dma-window properties
* per-device and not necessarily for the bus. So we need to
* search upwards in the tree until we either hit a dma-window
* property, OR find a parent with a table already allocated.
*/
pdn = pci_dma_find(dn, NULL);
if (pdn && PCI_DN(pdn))
return enable_ddw(pdev, pdn);
return false;
}
static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
struct dma_win *window;
struct memory_notify *arg = data;
int ret = 0;
switch (action) {
case MEM_GOING_ONLINE:
spin_lock(&dma_win_list_lock);
list_for_each_entry(window, &dma_win_list, list) {
if (window->direct) {
ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
arg->nr_pages, window->prop);
}
/* XXX log error */
}
spin_unlock(&dma_win_list_lock);
break;
case MEM_CANCEL_ONLINE:
case MEM_OFFLINE:
spin_lock(&dma_win_list_lock);
list_for_each_entry(window, &dma_win_list, list) {
if (window->direct) {
ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
arg->nr_pages, window->prop);
}
/* XXX log error */
}
spin_unlock(&dma_win_list_lock);
break;
default:
break;
}
if (ret && action != MEM_CANCEL_ONLINE)
return NOTIFY_BAD;
return NOTIFY_OK;
}
static struct notifier_block iommu_mem_nb = {
.notifier_call = iommu_mem_notifier,
};
static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
int err = NOTIFY_OK;
struct of_reconfig_data *rd = data;
struct device_node *np = rd->dn;
struct pci_dn *pci = PCI_DN(np);
struct dma_win *window;
switch (action) {
case OF_RECONFIG_DETACH_NODE:
/*
* Removing the property will invoke the reconfig
* notifier again, which causes dead-lock on the
* read-write semaphore of the notifier chain. So
* we have to remove the property when releasing
* the device node.
*/
if (remove_ddw(np, false, DIRECT64_PROPNAME))
remove_ddw(np, false, DMA64_PROPNAME);
if (pci && pci->table_group)
iommu_pseries_free_group(pci->table_group,
np->full_name);
spin_lock(&dma_win_list_lock);
list_for_each_entry(window, &dma_win_list, list) {
if (window->device == np) {
list_del(&window->list);
kfree(window);
break;
}
}
spin_unlock(&dma_win_list_lock);
break;
default:
err = NOTIFY_DONE;
break;
}
return err;
}
static struct notifier_block iommu_reconfig_nb = {
.notifier_call = iommu_reconfig_notifier,
};
/* These are called very early. */
void __init iommu_init_early_pSeries(void)
{
if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
return;
if (firmware_has_feature(FW_FEATURE_LPAR)) {
pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
if (!disable_ddw)
pseries_pci_controller_ops.iommu_bypass_supported =
iommu_bypass_supported_pSeriesLP;
} else {
pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
}
of_reconfig_notifier_register(&iommu_reconfig_nb);
register_memory_notifier(&iommu_mem_nb);
set_pci_dma_ops(&dma_iommu_ops);
}
static int __init disable_multitce(char *str)
{
if (strcmp(str, "off") == 0 &&
firmware_has_feature(FW_FEATURE_LPAR) &&
(firmware_has_feature(FW_FEATURE_PUT_TCE_IND) ||
firmware_has_feature(FW_FEATURE_STUFF_TCE))) {
printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
powerpc_firmware_features &=
~(FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE);
}
return 1;
}
__setup("multitce=", disable_multitce);
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct iommu_group *pSeries_pci_device_group(struct pci_controller *hose,
struct pci_dev *pdev)
{
struct device_node *pdn, *dn = pdev->dev.of_node;
struct iommu_group *grp;
struct pci_dn *pci;
pdn = pci_dma_find(dn, NULL);
if (!pdn || !PCI_DN(pdn))
return ERR_PTR(-ENODEV);
pci = PCI_DN(pdn);
if (!pci->table_group)
return ERR_PTR(-ENODEV);
grp = pci->table_group->group;
if (!grp)
return ERR_PTR(-ENODEV);
return iommu_group_ref_get(grp);
}
#endif
| linux-master | arch/powerpc/platforms/pseries/iommu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IBM PowerPC Virtual I/O Infrastructure Support.
*
* Copyright (c) 2003,2008 IBM Corp.
* Dave Engebretsen [email protected]
* Santiago Leon [email protected]
* Hollis Blanchard <[email protected]>
* Stephen Rothwell
* Robert Jennings <[email protected]>
*/
#include <linux/cpu.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/console.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-map-ops.h>
#include <linux/kobject.h>
#include <linux/kexec.h>
#include <linux/of_irq.h>
#include <asm/iommu.h>
#include <asm/dma.h>
#include <asm/vio.h>
#include <asm/prom.h>
#include <asm/firmware.h>
#include <asm/tce.h>
#include <asm/page.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>
static struct vio_dev vio_bus_device = { /* fake "parent" device */
.name = "vio",
.type = "",
.dev.init_name = "vio",
.dev.bus = &vio_bus_type,
};
#ifdef CONFIG_PPC_SMLPAR
/**
* vio_cmo_pool - A pool of IO memory for CMO use
*
* @size: The size of the pool in bytes
* @free: The amount of free memory in the pool
*/
struct vio_cmo_pool {
size_t size;
size_t free;
};
/* How many ms to delay queued balance work */
#define VIO_CMO_BALANCE_DELAY 100
/* Portion out IO memory to CMO devices by this chunk size */
#define VIO_CMO_BALANCE_CHUNK 131072
/**
* vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
*
* @vio_dev: struct vio_dev pointer
* @list: pointer to other devices on bus that are being tracked
*/
struct vio_cmo_dev_entry {
struct vio_dev *viodev;
struct list_head list;
};
/**
* vio_cmo - VIO bus accounting structure for CMO entitlement
*
* @lock: spinlock for entire structure
* @balance_q: work queue for balancing system entitlement
* @device_list: list of CMO-enabled devices requiring entitlement
* @entitled: total system entitlement in bytes
* @reserve: pool of memory from which devices reserve entitlement, incl. spare
* @excess: pool of excess entitlement not needed for device reserves or spare
* @spare: IO memory for device hotplug functionality
* @min: minimum necessary for system operation
* @desired: desired memory for system operation
* @curr: bytes currently allocated
* @high: high water mark for IO data usage
*/
static struct vio_cmo {
spinlock_t lock;
struct delayed_work balance_q;
struct list_head device_list;
size_t entitled;
struct vio_cmo_pool reserve;
struct vio_cmo_pool excess;
size_t spare;
size_t min;
size_t desired;
size_t curr;
size_t high;
} vio_cmo;
/**
* vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
*/
static int vio_cmo_num_OF_devs(void)
{
struct device_node *node_vroot;
int count = 0;
/*
* Count the number of vdevice entries with an
* ibm,my-dma-window OF property
*/
node_vroot = of_find_node_by_name(NULL, "vdevice");
if (node_vroot) {
struct device_node *of_node;
struct property *prop;
for_each_child_of_node(node_vroot, of_node) {
prop = of_find_property(of_node, "ibm,my-dma-window",
NULL);
if (prop)
count++;
}
}
of_node_put(node_vroot);
return count;
}
/**
* vio_cmo_alloc - allocate IO memory for CMO-enable devices
*
* @viodev: VIO device requesting IO memory
* @size: size of allocation requested
*
* Allocations come from memory reserved for the devices and any excess
* IO memory available to all devices. The spare pool used to service
* hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
* made available.
*
* Return codes:
* 0 for successful allocation and -ENOMEM for a failure
*/
static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
{
unsigned long flags;
size_t reserve_free = 0;
size_t excess_free = 0;
int ret = -ENOMEM;
spin_lock_irqsave(&vio_cmo.lock, flags);
/* Determine the amount of free entitlement available in reserve */
if (viodev->cmo.entitled > viodev->cmo.allocated)
reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
/* If spare is not fulfilled, the excess pool can not be used. */
if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
excess_free = vio_cmo.excess.free;
/* The request can be satisfied */
if ((reserve_free + excess_free) >= size) {
vio_cmo.curr += size;
if (vio_cmo.curr > vio_cmo.high)
vio_cmo.high = vio_cmo.curr;
viodev->cmo.allocated += size;
size -= min(reserve_free, size);
vio_cmo.excess.free -= size;
ret = 0;
}
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return ret;
}
/**
* vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
* @viodev: VIO device freeing IO memory
* @size: size of deallocation
*
* IO memory is freed by the device back to the correct memory pools.
* The spare pool is replenished first from either memory pool, then
* the reserve pool is used to reduce device entitlement, the excess
* pool is used to increase the reserve pool toward the desired entitlement
* target, and then the remaining memory is returned to the pools.
*
*/
static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
{
unsigned long flags;
size_t spare_needed = 0;
size_t excess_freed = 0;
size_t reserve_freed = size;
size_t tmp;
int balance = 0;
spin_lock_irqsave(&vio_cmo.lock, flags);
vio_cmo.curr -= size;
/* Amount of memory freed from the excess pool */
if (viodev->cmo.allocated > viodev->cmo.entitled) {
excess_freed = min(reserve_freed, (viodev->cmo.allocated -
viodev->cmo.entitled));
reserve_freed -= excess_freed;
}
/* Remove allocation from device */
viodev->cmo.allocated -= (reserve_freed + excess_freed);
/* Spare is a subset of the reserve pool, replenish it first. */
spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
/*
* Replenish the spare in the reserve pool from the excess pool.
* This moves entitlement into the reserve pool.
*/
if (spare_needed && excess_freed) {
tmp = min(excess_freed, spare_needed);
vio_cmo.excess.size -= tmp;
vio_cmo.reserve.size += tmp;
vio_cmo.spare += tmp;
excess_freed -= tmp;
spare_needed -= tmp;
balance = 1;
}
/*
* Replenish the spare in the reserve pool from the reserve pool.
* This removes entitlement from the device down to VIO_CMO_MIN_ENT,
* if needed, and gives it to the spare pool. The amount of used
* memory in this pool does not change.
*/
if (spare_needed && reserve_freed) {
tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
vio_cmo.spare += tmp;
viodev->cmo.entitled -= tmp;
reserve_freed -= tmp;
spare_needed -= tmp;
balance = 1;
}
/*
* Increase the reserve pool until the desired allocation is met.
* Move an allocation freed from the excess pool into the reserve
* pool and schedule a balance operation.
*/
if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
vio_cmo.excess.size -= tmp;
vio_cmo.reserve.size += tmp;
excess_freed -= tmp;
balance = 1;
}
/* Return memory from the excess pool to that pool */
if (excess_freed)
vio_cmo.excess.free += excess_freed;
if (balance)
schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
spin_unlock_irqrestore(&vio_cmo.lock, flags);
}
/**
* vio_cmo_entitlement_update - Manage system entitlement changes
*
* @new_entitlement: new system entitlement to attempt to accommodate
*
* Increases in entitlement will be used to fulfill the spare entitlement
* and the rest is given to the excess pool. Decreases, if they are
* possible, come from the excess pool and from unused device entitlement
*
* Returns: 0 on success, -ENOMEM when change can not be made
*/
int vio_cmo_entitlement_update(size_t new_entitlement)
{
struct vio_dev *viodev;
struct vio_cmo_dev_entry *dev_ent;
unsigned long flags;
size_t avail, delta, tmp;
spin_lock_irqsave(&vio_cmo.lock, flags);
/* Entitlement increases */
if (new_entitlement > vio_cmo.entitled) {
delta = new_entitlement - vio_cmo.entitled;
/* Fulfill spare allocation */
if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
vio_cmo.spare += tmp;
vio_cmo.reserve.size += tmp;
delta -= tmp;
}
/* Remaining new allocation goes to the excess pool */
vio_cmo.entitled += delta;
vio_cmo.excess.size += delta;
vio_cmo.excess.free += delta;
goto out;
}
/* Entitlement decreases */
delta = vio_cmo.entitled - new_entitlement;
avail = vio_cmo.excess.free;
/*
* Need to check how much unused entitlement each device can
* sacrifice to fulfill entitlement change.
*/
list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
if (avail >= delta)
break;
viodev = dev_ent->viodev;
if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
(viodev->cmo.entitled > VIO_CMO_MIN_ENT))
avail += viodev->cmo.entitled -
max_t(size_t, viodev->cmo.allocated,
VIO_CMO_MIN_ENT);
}
if (delta <= avail) {
vio_cmo.entitled -= delta;
/* Take entitlement from the excess pool first */
tmp = min(vio_cmo.excess.free, delta);
vio_cmo.excess.size -= tmp;
vio_cmo.excess.free -= tmp;
delta -= tmp;
/*
* Remove all but VIO_CMO_MIN_ENT bytes from devices
* until entitlement change is served
*/
list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
if (!delta)
break;
viodev = dev_ent->viodev;
tmp = 0;
if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
(viodev->cmo.entitled > VIO_CMO_MIN_ENT))
tmp = viodev->cmo.entitled -
max_t(size_t, viodev->cmo.allocated,
VIO_CMO_MIN_ENT);
viodev->cmo.entitled -= min(tmp, delta);
delta -= min(tmp, delta);
}
} else {
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return -ENOMEM;
}
out:
schedule_delayed_work(&vio_cmo.balance_q, 0);
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return 0;
}
/**
* vio_cmo_balance - Balance entitlement among devices
*
* @work: work queue structure for this operation
*
* Any system entitlement above the minimum needed for devices, or
* already allocated to devices, can be distributed to the devices.
* The list of devices is iterated through to recalculate the desired
* entitlement level and to determine how much entitlement above the
* minimum entitlement is allocated to devices.
*
* Small chunks of the available entitlement are given to devices until
* their requirements are fulfilled or there is no entitlement left to give.
* Upon completion sizes of the reserve and excess pools are calculated.
*
* The system minimum entitlement level is also recalculated here.
* Entitlement will be reserved for devices even after vio_bus_remove to
* accommodate reloading the driver. The OF tree is walked to count the
* number of devices present and this will remove entitlement for devices
* that have actually left the system after having vio_bus_remove called.
*/
static void vio_cmo_balance(struct work_struct *work)
{
struct vio_cmo *cmo;
struct vio_dev *viodev;
struct vio_cmo_dev_entry *dev_ent;
unsigned long flags;
size_t avail = 0, level, chunk, need;
int devcount = 0, fulfilled;
cmo = container_of(work, struct vio_cmo, balance_q.work);
spin_lock_irqsave(&vio_cmo.lock, flags);
/* Calculate minimum entitlement and fulfill spare */
cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
BUG_ON(cmo->min > cmo->entitled);
cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
cmo->min += cmo->spare;
cmo->desired = cmo->min;
/*
* Determine how much entitlement is available and reset device
* entitlements
*/
avail = cmo->entitled - cmo->spare;
list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
viodev = dev_ent->viodev;
devcount++;
viodev->cmo.entitled = VIO_CMO_MIN_ENT;
cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
}
/*
* Having provided each device with the minimum entitlement, loop
* over the devices portioning out the remaining entitlement
* until there is nothing left.
*/
level = VIO_CMO_MIN_ENT;
while (avail) {
fulfilled = 0;
list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
viodev = dev_ent->viodev;
if (viodev->cmo.desired <= level) {
fulfilled++;
continue;
}
/*
* Give the device up to VIO_CMO_BALANCE_CHUNK
* bytes of entitlement, but do not exceed the
* desired level of entitlement for the device.
*/
chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
chunk = min(chunk, (viodev->cmo.desired -
viodev->cmo.entitled));
viodev->cmo.entitled += chunk;
/*
* If the memory for this entitlement increase was
* already allocated to the device it does not come
* from the available pool being portioned out.
*/
need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
max(viodev->cmo.allocated, level);
avail -= need;
}
if (fulfilled == devcount)
break;
level += VIO_CMO_BALANCE_CHUNK;
}
/* Calculate new reserve and excess pool sizes */
cmo->reserve.size = cmo->min;
cmo->excess.free = 0;
cmo->excess.size = 0;
need = 0;
list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
viodev = dev_ent->viodev;
/* Calculated reserve size above the minimum entitlement */
if (viodev->cmo.entitled)
cmo->reserve.size += (viodev->cmo.entitled -
VIO_CMO_MIN_ENT);
/* Calculated used excess entitlement */
if (viodev->cmo.allocated > viodev->cmo.entitled)
need += viodev->cmo.allocated - viodev->cmo.entitled;
}
cmo->excess.size = cmo->entitled - cmo->reserve.size;
cmo->excess.free = cmo->excess.size - need;
cancel_delayed_work(to_delayed_work(work));
spin_unlock_irqrestore(&vio_cmo.lock, flags);
}
static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
void *ret;
if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
atomic_inc(&viodev->cmo.allocs_failed);
return NULL;
}
ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
dma_handle, dev->coherent_dma_mask, flag,
dev_to_node(dev));
if (unlikely(ret == NULL)) {
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
atomic_inc(&viodev->cmo.allocs_failed);
}
return ret;
}
static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
}
static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl = get_iommu_table_base(dev);
dma_addr_t ret = DMA_MAPPING_ERROR;
if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
goto out_fail;
ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev),
direction, attrs);
if (unlikely(ret == DMA_MAPPING_ERROR))
goto out_deallocate;
return ret;
out_deallocate:
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
out_fail:
atomic_inc(&viodev->cmo.allocs_failed);
return DMA_MAPPING_ERROR;
}
static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl = get_iommu_table_base(dev);
iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
}
static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl = get_iommu_table_base(dev);
struct scatterlist *sgl;
int ret, count;
size_t alloc_size = 0;
for_each_sg(sglist, sgl, nelems, count)
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
ret = vio_cmo_alloc(viodev, alloc_size);
if (ret)
goto out_fail;
ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev),
direction, attrs);
if (unlikely(!ret))
goto out_deallocate;
for_each_sg(sglist, sgl, ret, count)
alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
if (alloc_size)
vio_cmo_dealloc(viodev, alloc_size);
return ret;
out_deallocate:
vio_cmo_dealloc(viodev, alloc_size);
out_fail:
atomic_inc(&viodev->cmo.allocs_failed);
return ret;
}
static void vio_dma_iommu_unmap_sg(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction,
unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl = get_iommu_table_base(dev);
struct scatterlist *sgl;
size_t alloc_size = 0;
int count;
for_each_sg(sglist, sgl, nelems, count)
alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs);
vio_cmo_dealloc(viodev, alloc_size);
}
static const struct dma_map_ops vio_dma_mapping_ops = {
.alloc = vio_dma_iommu_alloc_coherent,
.free = vio_dma_iommu_free_coherent,
.map_sg = vio_dma_iommu_map_sg,
.unmap_sg = vio_dma_iommu_unmap_sg,
.map_page = vio_dma_iommu_map_page,
.unmap_page = vio_dma_iommu_unmap_page,
.dma_supported = dma_iommu_dma_supported,
.get_required_mask = dma_iommu_get_required_mask,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
};
/**
* vio_cmo_set_dev_desired - Set desired entitlement for a device
*
* @viodev: struct vio_dev for device to alter
* @desired: new desired entitlement level in bytes
*
* For use by devices to request a change to their entitlement at runtime or
* through sysfs. The desired entitlement level is changed and a balancing
* of system resources is scheduled to run in the future.
*/
void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
{
unsigned long flags;
struct vio_cmo_dev_entry *dev_ent;
int found = 0;
if (!firmware_has_feature(FW_FEATURE_CMO))
return;
spin_lock_irqsave(&vio_cmo.lock, flags);
if (desired < VIO_CMO_MIN_ENT)
desired = VIO_CMO_MIN_ENT;
/*
* Changes will not be made for devices not in the device list.
* If it is not in the device list, then no driver is loaded
* for the device and it can not receive entitlement.
*/
list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
if (viodev == dev_ent->viodev) {
found = 1;
break;
}
if (!found) {
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return;
}
/* Increase/decrease in desired device entitlement */
if (desired >= viodev->cmo.desired) {
/* Just bump the bus and device values prior to a balance*/
vio_cmo.desired += desired - viodev->cmo.desired;
viodev->cmo.desired = desired;
} else {
/* Decrease bus and device values for desired entitlement */
vio_cmo.desired -= viodev->cmo.desired - desired;
viodev->cmo.desired = desired;
/*
* If less entitlement is desired than current entitlement, move
* any reserve memory in the change region to the excess pool.
*/
if (viodev->cmo.entitled > desired) {
vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
vio_cmo.excess.size += viodev->cmo.entitled - desired;
/*
* If entitlement moving from the reserve pool to the
* excess pool is currently unused, add to the excess
* free counter.
*/
if (viodev->cmo.allocated < viodev->cmo.entitled)
vio_cmo.excess.free += viodev->cmo.entitled -
max(viodev->cmo.allocated, desired);
viodev->cmo.entitled = desired;
}
}
schedule_delayed_work(&vio_cmo.balance_q, 0);
spin_unlock_irqrestore(&vio_cmo.lock, flags);
}
/**
* vio_cmo_bus_probe - Handle CMO specific bus probe activities
*
* @viodev - Pointer to struct vio_dev for device
*
* Determine the devices IO memory entitlement needs, attempting
* to satisfy the system minimum entitlement at first and scheduling
* a balance operation to take care of the rest at a later time.
*
* Returns: 0 on success, -EINVAL when device doesn't support CMO, and
* -ENOMEM when entitlement is not available for device or
* device entry.
*
*/
static int vio_cmo_bus_probe(struct vio_dev *viodev)
{
struct vio_cmo_dev_entry *dev_ent;
struct device *dev = &viodev->dev;
struct iommu_table *tbl;
struct vio_driver *viodrv = to_vio_driver(dev->driver);
unsigned long flags;
size_t size;
bool dma_capable = false;
tbl = get_iommu_table_base(dev);
/* A device requires entitlement if it has a DMA window property */
switch (viodev->family) {
case VDEVICE:
if (of_get_property(viodev->dev.of_node,
"ibm,my-dma-window", NULL))
dma_capable = true;
break;
case PFO:
dma_capable = false;
break;
default:
dev_warn(dev, "unknown device family: %d\n", viodev->family);
BUG();
break;
}
/* Configure entitlement for the device. */
if (dma_capable) {
/* Check that the driver is CMO enabled and get desired DMA */
if (!viodrv->get_desired_dma) {
dev_err(dev, "%s: device driver does not support CMO\n",
__func__);
return -EINVAL;
}
viodev->cmo.desired =
IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
viodev->cmo.desired = VIO_CMO_MIN_ENT;
size = VIO_CMO_MIN_ENT;
dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
GFP_KERNEL);
if (!dev_ent)
return -ENOMEM;
dev_ent->viodev = viodev;
spin_lock_irqsave(&vio_cmo.lock, flags);
list_add(&dev_ent->list, &vio_cmo.device_list);
} else {
viodev->cmo.desired = 0;
size = 0;
spin_lock_irqsave(&vio_cmo.lock, flags);
}
/*
* If the needs for vio_cmo.min have not changed since they
* were last set, the number of devices in the OF tree has
* been constant and the IO memory for this is already in
* the reserve pool.
*/
if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
VIO_CMO_MIN_ENT)) {
/* Updated desired entitlement if device requires it */
if (size)
vio_cmo.desired += (viodev->cmo.desired -
VIO_CMO_MIN_ENT);
} else {
size_t tmp;
tmp = vio_cmo.spare + vio_cmo.excess.free;
if (tmp < size) {
dev_err(dev, "%s: insufficient free "
"entitlement to add device. "
"Need %lu, have %lu\n", __func__,
size, (vio_cmo.spare + tmp));
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return -ENOMEM;
}
/* Use excess pool first to fulfill request */
tmp = min(size, vio_cmo.excess.free);
vio_cmo.excess.free -= tmp;
vio_cmo.excess.size -= tmp;
vio_cmo.reserve.size += tmp;
/* Use spare if excess pool was insufficient */
vio_cmo.spare -= size - tmp;
/* Update bus accounting */
vio_cmo.min += size;
vio_cmo.desired += viodev->cmo.desired;
}
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return 0;
}
/**
* vio_cmo_bus_remove - Handle CMO specific bus removal activities
*
* @viodev - Pointer to struct vio_dev for device
*
* Remove the device from the cmo device list. The minimum entitlement
* will be reserved for the device as long as it is in the system. The
* rest of the entitlement the device had been allocated will be returned
* to the system.
*/
static void vio_cmo_bus_remove(struct vio_dev *viodev)
{
struct vio_cmo_dev_entry *dev_ent;
unsigned long flags;
size_t tmp;
spin_lock_irqsave(&vio_cmo.lock, flags);
if (viodev->cmo.allocated) {
dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
"allocated after remove operation.\n",
__func__, viodev->cmo.allocated);
BUG();
}
/*
* Remove the device from the device list being maintained for
* CMO enabled devices.
*/
list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
if (viodev == dev_ent->viodev) {
list_del(&dev_ent->list);
kfree(dev_ent);
break;
}
/*
* Devices may not require any entitlement and they do not need
* to be processed. Otherwise, return the device's entitlement
* back to the pools.
*/
if (viodev->cmo.entitled) {
/*
* This device has not yet left the OF tree, it's
* minimum entitlement remains in vio_cmo.min and
* vio_cmo.desired
*/
vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
/*
* Save min allocation for device in reserve as long
* as it exists in OF tree as determined by later
* balance operation
*/
viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
/* Replenish spare from freed reserve pool */
if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
vio_cmo.spare));
vio_cmo.spare += tmp;
viodev->cmo.entitled -= tmp;
}
/* Remaining reserve goes to excess pool */
vio_cmo.excess.size += viodev->cmo.entitled;
vio_cmo.excess.free += viodev->cmo.entitled;
vio_cmo.reserve.size -= viodev->cmo.entitled;
/*
* Until the device is removed it will keep a
* minimum entitlement; this will guarantee that
* a module unload/load will result in a success.
*/
viodev->cmo.entitled = VIO_CMO_MIN_ENT;
viodev->cmo.desired = VIO_CMO_MIN_ENT;
atomic_set(&viodev->cmo.allocs_failed, 0);
}
spin_unlock_irqrestore(&vio_cmo.lock, flags);
}
static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
{
set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
}
/**
* vio_cmo_bus_init - CMO entitlement initialization at bus init time
*
* Set up the reserve and excess entitlement pools based on available
* system entitlement and the number of devices in the OF tree that
* require entitlement in the reserve pool.
*/
static void vio_cmo_bus_init(void)
{
struct hvcall_mpp_data mpp_data;
int err;
memset(&vio_cmo, 0, sizeof(struct vio_cmo));
spin_lock_init(&vio_cmo.lock);
INIT_LIST_HEAD(&vio_cmo.device_list);
INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
/* Get current system entitlement */
err = h_get_mpp(&mpp_data);
/*
* On failure, continue with entitlement set to 0, will panic()
* later when spare is reserved.
*/
if (err != H_SUCCESS) {
printk(KERN_ERR "%s: unable to determine system IO "\
"entitlement. (%d)\n", __func__, err);
vio_cmo.entitled = 0;
} else {
vio_cmo.entitled = mpp_data.entitled_mem;
}
/* Set reservation and check against entitlement */
vio_cmo.spare = VIO_CMO_MIN_ENT;
vio_cmo.reserve.size = vio_cmo.spare;
vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
VIO_CMO_MIN_ENT);
if (vio_cmo.reserve.size > vio_cmo.entitled) {
printk(KERN_ERR "%s: insufficient system entitlement\n",
__func__);
panic("%s: Insufficient system entitlement", __func__);
}
/* Set the remaining accounting variables */
vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
vio_cmo.excess.free = vio_cmo.excess.size;
vio_cmo.min = vio_cmo.reserve.size;
vio_cmo.desired = vio_cmo.reserve.size;
}
/* sysfs device functions and data structures for CMO */
#define viodev_cmo_rd_attr(name) \
static ssize_t cmo_##name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
}
static ssize_t cmo_allocs_failed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vio_dev *viodev = to_vio_dev(dev);
return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
}
static ssize_t cmo_allocs_failed_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct vio_dev *viodev = to_vio_dev(dev);
atomic_set(&viodev->cmo.allocs_failed, 0);
return count;
}
static ssize_t cmo_desired_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct vio_dev *viodev = to_vio_dev(dev);
size_t new_desired;
int ret;
ret = kstrtoul(buf, 10, &new_desired);
if (ret)
return ret;
vio_cmo_set_dev_desired(viodev, new_desired);
return count;
}
viodev_cmo_rd_attr(desired);
viodev_cmo_rd_attr(entitled);
viodev_cmo_rd_attr(allocated);
static ssize_t name_show(struct device *, struct device_attribute *, char *);
static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf);
static struct device_attribute dev_attr_name;
static struct device_attribute dev_attr_devspec;
static struct device_attribute dev_attr_modalias;
static DEVICE_ATTR_RO(cmo_entitled);
static DEVICE_ATTR_RO(cmo_allocated);
static DEVICE_ATTR_RW(cmo_desired);
static DEVICE_ATTR_RW(cmo_allocs_failed);
static struct attribute *vio_cmo_dev_attrs[] = {
&dev_attr_name.attr,
&dev_attr_devspec.attr,
&dev_attr_modalias.attr,
&dev_attr_cmo_entitled.attr,
&dev_attr_cmo_allocated.attr,
&dev_attr_cmo_desired.attr,
&dev_attr_cmo_allocs_failed.attr,
NULL,
};
ATTRIBUTE_GROUPS(vio_cmo_dev);
/* sysfs bus functions and data structures for CMO */
#define viobus_cmo_rd_attr(name) \
static ssize_t cmo_bus_##name##_show(const struct bus_type *bt, char *buf) \
{ \
return sprintf(buf, "%lu\n", vio_cmo.name); \
} \
static struct bus_attribute bus_attr_cmo_bus_##name = \
__ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL)
#define viobus_cmo_pool_rd_attr(name, var) \
static ssize_t \
cmo_##name##_##var##_show(const struct bus_type *bt, char *buf) \
{ \
return sprintf(buf, "%lu\n", vio_cmo.name.var); \
} \
static BUS_ATTR_RO(cmo_##name##_##var)
viobus_cmo_rd_attr(entitled);
viobus_cmo_rd_attr(spare);
viobus_cmo_rd_attr(min);
viobus_cmo_rd_attr(desired);
viobus_cmo_rd_attr(curr);
viobus_cmo_pool_rd_attr(reserve, size);
viobus_cmo_pool_rd_attr(excess, size);
viobus_cmo_pool_rd_attr(excess, free);
static ssize_t cmo_high_show(const struct bus_type *bt, char *buf)
{
return sprintf(buf, "%lu\n", vio_cmo.high);
}
static ssize_t cmo_high_store(const struct bus_type *bt, const char *buf,
size_t count)
{
unsigned long flags;
spin_lock_irqsave(&vio_cmo.lock, flags);
vio_cmo.high = vio_cmo.curr;
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return count;
}
static BUS_ATTR_RW(cmo_high);
static struct attribute *vio_bus_attrs[] = {
&bus_attr_cmo_bus_entitled.attr,
&bus_attr_cmo_bus_spare.attr,
&bus_attr_cmo_bus_min.attr,
&bus_attr_cmo_bus_desired.attr,
&bus_attr_cmo_bus_curr.attr,
&bus_attr_cmo_high.attr,
&bus_attr_cmo_reserve_size.attr,
&bus_attr_cmo_excess_size.attr,
&bus_attr_cmo_excess_free.attr,
NULL,
};
ATTRIBUTE_GROUPS(vio_bus);
static void __init vio_cmo_sysfs_init(void)
{
vio_bus_type.dev_groups = vio_cmo_dev_groups;
vio_bus_type.bus_groups = vio_bus_groups;
}
#else /* CONFIG_PPC_SMLPAR */
int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
static void vio_cmo_bus_init(void) {}
static void __init vio_cmo_sysfs_init(void) { }
#endif /* CONFIG_PPC_SMLPAR */
EXPORT_SYMBOL(vio_cmo_entitlement_update);
EXPORT_SYMBOL(vio_cmo_set_dev_desired);
/*
* Platform Facilities Option (PFO) support
*/
/**
* vio_h_cop_sync - Perform a synchronous PFO co-processor operation
*
* @vdev - Pointer to a struct vio_dev for device
* @op - Pointer to a struct vio_pfo_op for the operation parameters
*
* Calls the hypervisor to synchronously perform the PFO operation
* described in @op. In the case of a busy response from the hypervisor,
* the operation will be re-submitted indefinitely unless a non-zero timeout
* is specified or an error occurs. The timeout places a limit on when to
* stop re-submitting a operation, the total time can be exceeded if an
* operation is in progress.
*
* If op->hcall_ret is not NULL, this will be set to the return from the
* last h_cop_op call or it will be 0 if an error not involving the h_call
* was encountered.
*
* Returns:
* 0 on success,
* -EINVAL if the h_call fails due to an invalid parameter,
* -E2BIG if the h_call can not be performed synchronously,
* -EBUSY if a timeout is specified and has elapsed,
* -EACCES if the memory area for data/status has been rescinded, or
* -EPERM if a hardware fault has been indicated
*/
int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
{
struct device *dev = &vdev->dev;
unsigned long deadline = 0;
long hret = 0;
int ret = 0;
if (op->timeout)
deadline = jiffies + msecs_to_jiffies(op->timeout);
while (true) {
hret = plpar_hcall_norets(H_COP, op->flags,
vdev->resource_id,
op->in, op->inlen, op->out,
op->outlen, op->csbcpb);
if (hret == H_SUCCESS ||
(hret != H_NOT_ENOUGH_RESOURCES &&
hret != H_BUSY && hret != H_RESOURCE) ||
(op->timeout && time_after(deadline, jiffies)))
break;
dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
}
switch (hret) {
case H_SUCCESS:
ret = 0;
break;
case H_OP_MODE:
case H_TOO_BIG:
ret = -E2BIG;
break;
case H_RESCINDED:
ret = -EACCES;
break;
case H_HARDWARE:
ret = -EPERM;
break;
case H_NOT_ENOUGH_RESOURCES:
case H_RESOURCE:
case H_BUSY:
ret = -EBUSY;
break;
default:
ret = -EINVAL;
break;
}
if (ret)
dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
__func__, ret, hret);
op->hcall_err = hret;
return ret;
}
EXPORT_SYMBOL(vio_h_cop_sync);
static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
{
const __be32 *dma_window;
struct iommu_table *tbl;
unsigned long offset, size;
dma_window = of_get_property(dev->dev.of_node,
"ibm,my-dma-window", NULL);
if (!dma_window)
return NULL;
tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
if (tbl == NULL)
return NULL;
kref_init(&tbl->it_kref);
of_parse_dma_window(dev->dev.of_node, dma_window,
&tbl->it_index, &offset, &size);
/* TCE table size - measured in tce entries */
tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
tbl->it_size = size >> tbl->it_page_shift;
/* offset for VIO should always be 0 */
tbl->it_offset = offset >> tbl->it_page_shift;
tbl->it_busno = 0;
tbl->it_type = TCE_VB;
tbl->it_blocksize = 16;
if (firmware_has_feature(FW_FEATURE_LPAR))
tbl->it_ops = &iommu_table_lpar_multi_ops;
else
tbl->it_ops = &iommu_table_pseries_ops;
return iommu_init_table(tbl, -1, 0, 0);
}
/**
* vio_match_device: - Tell if a VIO device has a matching
* VIO device id structure.
* @ids: array of VIO device id structures to search in
* @dev: the VIO device structure to match against
*
* Used by a driver to check whether a VIO device present in the
* system is in its list of supported devices. Returns the matching
* vio_device_id structure or NULL if there is no match.
*/
static const struct vio_device_id *vio_match_device(
const struct vio_device_id *ids, const struct vio_dev *dev)
{
while (ids->type[0] != '\0') {
if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
of_device_is_compatible(dev->dev.of_node,
ids->compat))
return ids;
ids++;
}
return NULL;
}
/*
* Convert from struct device to struct vio_dev and pass to driver.
* dev->driver has already been set by generic code because vio_bus_match
* succeeded.
*/
static int vio_bus_probe(struct device *dev)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct vio_driver *viodrv = to_vio_driver(dev->driver);
const struct vio_device_id *id;
int error = -ENODEV;
if (!viodrv->probe)
return error;
id = vio_match_device(viodrv->id_table, viodev);
if (id) {
memset(&viodev->cmo, 0, sizeof(viodev->cmo));
if (firmware_has_feature(FW_FEATURE_CMO)) {
error = vio_cmo_bus_probe(viodev);
if (error)
return error;
}
error = viodrv->probe(viodev, id);
if (error && firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_bus_remove(viodev);
}
return error;
}
/* convert from struct device to struct vio_dev and pass to driver. */
static void vio_bus_remove(struct device *dev)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct vio_driver *viodrv = to_vio_driver(dev->driver);
struct device *devptr;
/*
* Hold a reference to the device after the remove function is called
* to allow for CMO accounting cleanup for the device.
*/
devptr = get_device(dev);
if (viodrv->remove)
viodrv->remove(viodev);
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_bus_remove(viodev);
put_device(devptr);
}
static void vio_bus_shutdown(struct device *dev)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct vio_driver *viodrv;
if (dev->driver) {
viodrv = to_vio_driver(dev->driver);
if (viodrv->shutdown)
viodrv->shutdown(viodev);
else if (kexec_in_progress)
vio_bus_remove(dev);
}
}
/**
* vio_register_driver: - Register a new vio driver
* @viodrv: The vio_driver structure to be registered.
*/
int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
const char *mod_name)
{
// vio_bus_type is only initialised for pseries
if (!machine_is(pseries))
return -ENODEV;
pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
/* fill in 'struct driver' fields */
viodrv->driver.name = viodrv->name;
viodrv->driver.pm = viodrv->pm;
viodrv->driver.bus = &vio_bus_type;
viodrv->driver.owner = owner;
viodrv->driver.mod_name = mod_name;
return driver_register(&viodrv->driver);
}
EXPORT_SYMBOL(__vio_register_driver);
/**
* vio_unregister_driver - Remove registration of vio driver.
* @viodrv: The vio_driver struct to be removed form registration
*/
void vio_unregister_driver(struct vio_driver *viodrv)
{
driver_unregister(&viodrv->driver);
}
EXPORT_SYMBOL(vio_unregister_driver);
/* vio_dev refcount hit 0 */
static void vio_dev_release(struct device *dev)
{
struct iommu_table *tbl = get_iommu_table_base(dev);
if (tbl)
iommu_tce_table_put(tbl);
of_node_put(dev->of_node);
kfree(to_vio_dev(dev));
}
/**
* vio_register_device_node: - Register a new vio device.
* @of_node: The OF node for this device.
*
* Creates and initializes a vio_dev structure from the data in
* of_node and adds it to the list of virtual devices.
* Returns a pointer to the created vio_dev or NULL if node has
* NULL device_type or compatible fields.
*/
struct vio_dev *vio_register_device_node(struct device_node *of_node)
{
struct vio_dev *viodev;
struct device_node *parent_node;
const __be32 *prop;
enum vio_dev_family family;
/*
* Determine if this node is a under the /vdevice node or under the
* /ibm,platform-facilities node. This decides the device's family.
*/
parent_node = of_get_parent(of_node);
if (parent_node) {
if (of_node_is_type(parent_node, "ibm,platform-facilities"))
family = PFO;
else if (of_node_is_type(parent_node, "vdevice"))
family = VDEVICE;
else {
pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n",
__func__,
parent_node,
of_node);
of_node_put(parent_node);
return NULL;
}
of_node_put(parent_node);
} else {
pr_warn("%s: could not determine the parent of node %pOFn.\n",
__func__, of_node);
return NULL;
}
if (family == PFO) {
if (of_property_read_bool(of_node, "interrupt-controller")) {
pr_debug("%s: Skipping the interrupt controller %pOFn.\n",
__func__, of_node);
return NULL;
}
}
/* allocate a vio_dev for this node */
viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
if (viodev == NULL) {
pr_warn("%s: allocation failure for VIO device.\n", __func__);
return NULL;
}
/* we need the 'device_type' property, in order to match with drivers */
viodev->family = family;
if (viodev->family == VDEVICE) {
unsigned int unit_address;
viodev->type = of_node_get_device_type(of_node);
if (!viodev->type) {
pr_warn("%s: node %pOFn is missing the 'device_type' "
"property.\n", __func__, of_node);
goto out;
}
prop = of_get_property(of_node, "reg", NULL);
if (prop == NULL) {
pr_warn("%s: node %pOFn missing 'reg'\n",
__func__, of_node);
goto out;
}
unit_address = of_read_number(prop, 1);
dev_set_name(&viodev->dev, "%x", unit_address);
viodev->irq = irq_of_parse_and_map(of_node, 0);
viodev->unit_address = unit_address;
} else {
/* PFO devices need their resource_id for submitting COP_OPs
* This is an optional field for devices, but is required when
* performing synchronous ops */
prop = of_get_property(of_node, "ibm,resource-id", NULL);
if (prop != NULL)
viodev->resource_id = of_read_number(prop, 1);
dev_set_name(&viodev->dev, "%pOFn", of_node);
viodev->type = dev_name(&viodev->dev);
viodev->irq = 0;
}
viodev->name = of_node->name;
viodev->dev.of_node = of_node_get(of_node);
set_dev_node(&viodev->dev, of_node_to_nid(of_node));
/* init generic 'struct device' fields: */
viodev->dev.parent = &vio_bus_device.dev;
viodev->dev.bus = &vio_bus_type;
viodev->dev.release = vio_dev_release;
if (of_property_present(viodev->dev.of_node, "ibm,my-dma-window")) {
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_set_dma_ops(viodev);
else
set_dma_ops(&viodev->dev, &dma_iommu_ops);
set_iommu_table_base(&viodev->dev,
vio_build_iommu_table(viodev));
/* needed to ensure proper operation of coherent allocations
* later, in case driver doesn't set it explicitly */
viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
}
/* register with generic device framework */
if (device_register(&viodev->dev)) {
printk(KERN_ERR "%s: failed to register device %s\n",
__func__, dev_name(&viodev->dev));
put_device(&viodev->dev);
return NULL;
}
return viodev;
out: /* Use this exit point for any return prior to device_register */
kfree(viodev);
return NULL;
}
EXPORT_SYMBOL(vio_register_device_node);
/*
* vio_bus_scan_for_devices - Scan OF and register each child device
* @root_name - OF node name for the root of the subtree to search.
* This must be non-NULL
*
* Starting from the root node provide, register the device node for
* each child beneath the root.
*/
static void __init vio_bus_scan_register_devices(char *root_name)
{
struct device_node *node_root, *node_child;
if (!root_name)
return;
node_root = of_find_node_by_name(NULL, root_name);
if (node_root) {
/*
* Create struct vio_devices for each virtual device in
* the device tree. Drivers will associate with them later.
*/
node_child = of_get_next_child(node_root, NULL);
while (node_child) {
vio_register_device_node(node_child);
node_child = of_get_next_child(node_root, node_child);
}
of_node_put(node_root);
}
}
/**
* vio_bus_init: - Initialize the virtual IO bus
*/
static int __init vio_bus_init(void)
{
int err;
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_sysfs_init();
err = bus_register(&vio_bus_type);
if (err) {
printk(KERN_ERR "failed to register VIO bus\n");
return err;
}
/*
* The fake parent of all vio devices, just to give us
* a nice directory
*/
err = device_register(&vio_bus_device.dev);
if (err) {
printk(KERN_WARNING "%s: device_register returned %i\n",
__func__, err);
return err;
}
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_bus_init();
return 0;
}
machine_postcore_initcall(pseries, vio_bus_init);
static int __init vio_device_init(void)
{
vio_bus_scan_register_devices("vdevice");
vio_bus_scan_register_devices("ibm,platform-facilities");
return 0;
}
machine_device_initcall(pseries, vio_device_init);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
}
static DEVICE_ATTR_RO(name);
static ssize_t devspec_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct device_node *of_node = dev->of_node;
return sprintf(buf, "%pOF\n", of_node);
}
static DEVICE_ATTR_RO(devspec);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
struct device_node *dn;
const char *cp;
dn = dev->of_node;
if (!dn) {
strcpy(buf, "\n");
return strlen(buf);
}
cp = of_get_property(dn, "compatible", NULL);
if (!cp) {
strcpy(buf, "\n");
return strlen(buf);
}
return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
}
static DEVICE_ATTR_RO(modalias);
static struct attribute *vio_dev_attrs[] = {
&dev_attr_name.attr,
&dev_attr_devspec.attr,
&dev_attr_modalias.attr,
NULL,
};
ATTRIBUTE_GROUPS(vio_dev);
void vio_unregister_device(struct vio_dev *viodev)
{
device_unregister(&viodev->dev);
if (viodev->family == VDEVICE)
irq_dispose_mapping(viodev->irq);
}
EXPORT_SYMBOL(vio_unregister_device);
static int vio_bus_match(struct device *dev, struct device_driver *drv)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
struct vio_driver *vio_drv = to_vio_driver(drv);
const struct vio_device_id *ids = vio_drv->id_table;
return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
}
static int vio_hotplug(const struct device *dev, struct kobj_uevent_env *env)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
const struct device_node *dn;
const char *cp;
dn = dev->of_node;
if (!dn)
return -ENODEV;
cp = of_get_property(dn, "compatible", NULL);
if (!cp)
return -ENODEV;
add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
return 0;
}
struct bus_type vio_bus_type = {
.name = "vio",
.dev_groups = vio_dev_groups,
.uevent = vio_hotplug,
.match = vio_bus_match,
.probe = vio_bus_probe,
.remove = vio_bus_remove,
.shutdown = vio_bus_shutdown,
};
/**
* vio_get_attribute: - get attribute for virtual device
* @vdev: The vio device to get property.
* @which: The property/attribute to be extracted.
* @length: Pointer to length of returned data size (unused if NULL).
*
* Calls prom.c's of_get_property() to return the value of the
* attribute specified by @which
*/
const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
{
return of_get_property(vdev->dev.of_node, which, length);
}
EXPORT_SYMBOL(vio_get_attribute);
/* vio_find_name() - internal because only vio.c knows how we formatted the
* kobject name
*/
static struct vio_dev *vio_find_name(const char *name)
{
struct device *found;
found = bus_find_device_by_name(&vio_bus_type, NULL, name);
if (!found)
return NULL;
return to_vio_dev(found);
}
/**
* vio_find_node - find an already-registered vio_dev
* @vnode: device_node of the virtual device we're looking for
*
* Takes a reference to the embedded struct device which needs to be dropped
* after use.
*/
struct vio_dev *vio_find_node(struct device_node *vnode)
{
char kobj_name[20];
struct device_node *vnode_parent;
vnode_parent = of_get_parent(vnode);
if (!vnode_parent)
return NULL;
/* construct the kobject name from the device node */
if (of_node_is_type(vnode_parent, "vdevice")) {
const __be32 *prop;
prop = of_get_property(vnode, "reg", NULL);
if (!prop)
goto out;
snprintf(kobj_name, sizeof(kobj_name), "%x",
(uint32_t)of_read_number(prop, 1));
} else if (of_node_is_type(vnode_parent, "ibm,platform-facilities"))
snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode);
else
goto out;
of_node_put(vnode_parent);
return vio_find_name(kobj_name);
out:
of_node_put(vnode_parent);
return NULL;
}
EXPORT_SYMBOL(vio_find_node);
int vio_enable_interrupts(struct vio_dev *dev)
{
int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
if (rc != H_SUCCESS)
printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
return rc;
}
EXPORT_SYMBOL(vio_enable_interrupts);
int vio_disable_interrupts(struct vio_dev *dev)
{
int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
if (rc != H_SUCCESS)
printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
return rc;
}
EXPORT_SYMBOL(vio_disable_interrupts);
static int __init vio_init(void)
{
dma_debug_add_bus(&vio_bus_type);
return 0;
}
machine_fs_initcall(pseries, vio_init);
| linux-master | arch/powerpc/platforms/pseries/vio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PCI Dynamic LPAR, PCI Hot Plug and PCI EEH recovery code
* for RPA-compliant PPC64 platform.
* Copyright (C) 2003 Linda Xie <[email protected]>
* Copyright (C) 2005 International Business Machines
*
* Updates, 2005, John Rose <[email protected]>
* Updates, 2005, Linas Vepstas <[email protected]>
*/
#include <linux/pci.h>
#include <linux/export.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
#include <asm/eeh.h>
#include "pseries.h"
struct pci_controller *init_phb_dynamic(struct device_node *dn)
{
struct pci_controller *phb;
pr_debug("PCI: Initializing new hotplug PHB %pOF\n", dn);
phb = pcibios_alloc_controller(dn);
if (!phb)
return NULL;
rtas_setup_phb(phb);
pci_process_bridge_OF_ranges(phb, dn, 0);
phb->controller_ops = pseries_pci_controller_ops;
pci_devs_phb_init_dynamic(phb);
pseries_msi_allocate_domains(phb);
/* Create EEH devices for the PHB */
eeh_phb_pe_create(phb);
if (dn->child)
pseries_eeh_init_edev_recursive(PCI_DN(dn));
pcibios_scan_phb(phb);
pcibios_finish_adding_to_bus(phb->bus);
return phb;
}
EXPORT_SYMBOL_GPL(init_phb_dynamic);
/* RPA-specific bits for removing PHBs */
int remove_phb_dynamic(struct pci_controller *phb)
{
struct pci_bus *b = phb->bus;
struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge);
struct resource *res;
int rc, i;
pr_debug("PCI: Removing PHB %04x:%02x...\n",
pci_domain_nr(b), b->number);
/* We cannot to remove a root bus that has children */
if (!(list_empty(&b->children) && list_empty(&b->devices)))
return -EBUSY;
/* We -know- there aren't any child devices anymore at this stage
* and thus, we can safely unmap the IO space as it's not in use
*/
res = &phb->io_resource;
if (res->flags & IORESOURCE_IO) {
rc = pcibios_unmap_io_space(b);
if (rc) {
printk(KERN_ERR "%s: failed to unmap IO on bus %s\n",
__func__, b->name);
return 1;
}
}
pseries_msi_free_domains(phb);
/* Keep a reference so phb isn't freed yet */
get_device(&host_bridge->dev);
/* Remove the PCI bus and unregister the bridge device from sysfs */
phb->bus = NULL;
pci_remove_bus(b);
host_bridge->bus = NULL;
device_unregister(&host_bridge->dev);
/* Now release the IO resource */
if (res->flags & IORESOURCE_IO)
release_resource(res);
/* Release memory resources */
for (i = 0; i < 3; ++i) {
res = &phb->mem_resources[i];
if (!(res->flags & IORESOURCE_MEM))
continue;
release_resource(res);
}
/*
* The pci_controller data structure is freed by
* the pcibios_free_controller_deferred() callback;
* see pseries_root_bridge_prepare().
*/
put_device(&host_bridge->dev);
return 0;
}
EXPORT_SYMBOL_GPL(remove_phb_dynamic);
| linux-master | arch/powerpc/platforms/pseries/pci_dlpar.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Confidential Computing Platform Capability checks
*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
*
* Author: Tom Lendacky <[email protected]>
*/
#include <linux/export.h>
#include <linux/cc_platform.h>
#include <asm/machdep.h>
#include <asm/svm.h>
bool cc_platform_has(enum cc_attr attr)
{
switch (attr) {
case CC_ATTR_MEM_ENCRYPT:
return is_secure_guest();
default:
return false;
}
}
EXPORT_SYMBOL_GPL(cc_platform_has);
| linux-master | arch/powerpc/platforms/pseries/cc_platform.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* POWER LPAR Platform KeyStore(PLPKS)
* Copyright (C) 2022 IBM Corporation
* Author: Nayna Jain <[email protected]>
*
* Provides access to variables stored in Power LPAR Platform KeyStore(PLPKS).
*/
#define pr_fmt(fmt) "plpks: " fmt
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/memblock.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>
#include <asm/plpks.h>
#include <asm/firmware.h>
static u8 *ospassword;
static u16 ospasswordlength;
// Retrieved with H_PKS_GET_CONFIG
static u8 version;
static u16 objoverhead;
static u16 maxpwsize;
static u16 maxobjsize;
static s16 maxobjlabelsize;
static u32 totalsize;
static u32 usedspace;
static u32 supportedpolicies;
static u32 maxlargeobjectsize;
static u64 signedupdatealgorithms;
struct plpks_auth {
u8 version;
u8 consumer;
__be64 rsvd0;
__be32 rsvd1;
__be16 passwordlength;
u8 password[];
} __packed __aligned(16);
struct label_attr {
u8 prefix[8];
u8 version;
u8 os;
u8 length;
u8 reserved[5];
};
struct label {
struct label_attr attr;
u8 name[PLPKS_MAX_NAME_SIZE];
size_t size;
};
static int pseries_status_to_err(int rc)
{
int err;
switch (rc) {
case H_SUCCESS:
err = 0;
break;
case H_FUNCTION:
err = -ENXIO;
break;
case H_PARAMETER:
case H_P2:
case H_P3:
case H_P4:
case H_P5:
case H_P6:
err = -EINVAL;
break;
case H_NOT_FOUND:
err = -ENOENT;
break;
case H_BUSY:
case H_LONG_BUSY_ORDER_1_MSEC:
case H_LONG_BUSY_ORDER_10_MSEC:
case H_LONG_BUSY_ORDER_100_MSEC:
case H_LONG_BUSY_ORDER_1_SEC:
case H_LONG_BUSY_ORDER_10_SEC:
case H_LONG_BUSY_ORDER_100_SEC:
err = -EBUSY;
break;
case H_AUTHORITY:
err = -EPERM;
break;
case H_NO_MEM:
err = -ENOMEM;
break;
case H_RESOURCE:
err = -EEXIST;
break;
case H_TOO_BIG:
err = -EFBIG;
break;
case H_STATE:
err = -EIO;
break;
case H_R_STATE:
err = -EIO;
break;
case H_IN_USE:
err = -EEXIST;
break;
case H_ABORTED:
err = -EIO;
break;
default:
err = -EINVAL;
}
pr_debug("Converted hypervisor code %d to Linux %d\n", rc, err);
return err;
}
static int plpks_gen_password(void)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
u8 *password, consumer = PLPKS_OS_OWNER;
int rc;
// If we booted from kexec, we could be reusing an existing password already
if (ospassword) {
pr_debug("Password of length %u already in use\n", ospasswordlength);
return 0;
}
// The password must not cross a page boundary, so we align to the next power of 2
password = kzalloc(roundup_pow_of_two(maxpwsize), GFP_KERNEL);
if (!password)
return -ENOMEM;
rc = plpar_hcall(H_PKS_GEN_PASSWORD, retbuf, consumer, 0,
virt_to_phys(password), maxpwsize);
if (!rc) {
ospasswordlength = maxpwsize;
ospassword = kzalloc(maxpwsize, GFP_KERNEL);
if (!ospassword) {
kfree(password);
return -ENOMEM;
}
memcpy(ospassword, password, ospasswordlength);
} else {
if (rc == H_IN_USE) {
pr_warn("Password already set - authenticated operations will fail\n");
rc = 0;
} else {
goto out;
}
}
out:
kfree(password);
return pseries_status_to_err(rc);
}
static struct plpks_auth *construct_auth(u8 consumer)
{
struct plpks_auth *auth;
if (consumer > PLPKS_OS_OWNER)
return ERR_PTR(-EINVAL);
// The auth structure must not cross a page boundary and must be
// 16 byte aligned. We align to the next largest power of 2
auth = kzalloc(roundup_pow_of_two(struct_size(auth, password, maxpwsize)), GFP_KERNEL);
if (!auth)
return ERR_PTR(-ENOMEM);
auth->version = 1;
auth->consumer = consumer;
if (consumer == PLPKS_FW_OWNER || consumer == PLPKS_BOOTLOADER_OWNER)
return auth;
memcpy(auth->password, ospassword, ospasswordlength);
auth->passwordlength = cpu_to_be16(ospasswordlength);
return auth;
}
/*
* Label is combination of label attributes + name.
* Label attributes are used internally by kernel and not exposed to the user.
*/
static struct label *construct_label(char *component, u8 varos, u8 *name,
u16 namelen)
{
struct label *label;
size_t slen = 0;
if (!name || namelen > PLPKS_MAX_NAME_SIZE)
return ERR_PTR(-EINVAL);
// Support NULL component for signed updates
if (component) {
slen = strlen(component);
if (slen > sizeof(label->attr.prefix))
return ERR_PTR(-EINVAL);
}
// The label structure must not cross a page boundary, so we align to the next power of 2
label = kzalloc(roundup_pow_of_two(sizeof(*label)), GFP_KERNEL);
if (!label)
return ERR_PTR(-ENOMEM);
if (component)
memcpy(&label->attr.prefix, component, slen);
label->attr.version = PLPKS_LABEL_VERSION;
label->attr.os = varos;
label->attr.length = PLPKS_MAX_LABEL_ATTR_SIZE;
memcpy(&label->name, name, namelen);
label->size = sizeof(struct label_attr) + namelen;
return label;
}
static int _plpks_get_config(void)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
struct config {
u8 version;
u8 flags;
__be16 rsvd0;
__be16 objoverhead;
__be16 maxpwsize;
__be16 maxobjlabelsize;
__be16 maxobjsize;
__be32 totalsize;
__be32 usedspace;
__be32 supportedpolicies;
__be32 maxlargeobjectsize;
__be64 signedupdatealgorithms;
u8 rsvd1[476];
} __packed * config;
size_t size;
int rc = 0;
size = sizeof(*config);
// Config struct must not cross a page boundary. So long as the struct
// size is a power of 2, this should be fine as alignment is guaranteed
config = kzalloc(size, GFP_KERNEL);
if (!config) {
rc = -ENOMEM;
goto err;
}
rc = plpar_hcall(H_PKS_GET_CONFIG, retbuf, virt_to_phys(config), size);
if (rc != H_SUCCESS) {
rc = pseries_status_to_err(rc);
goto err;
}
version = config->version;
objoverhead = be16_to_cpu(config->objoverhead);
maxpwsize = be16_to_cpu(config->maxpwsize);
maxobjsize = be16_to_cpu(config->maxobjsize);
maxobjlabelsize = be16_to_cpu(config->maxobjlabelsize);
totalsize = be32_to_cpu(config->totalsize);
usedspace = be32_to_cpu(config->usedspace);
supportedpolicies = be32_to_cpu(config->supportedpolicies);
maxlargeobjectsize = be32_to_cpu(config->maxlargeobjectsize);
signedupdatealgorithms = be64_to_cpu(config->signedupdatealgorithms);
// Validate that the numbers we get back match the requirements of the spec
if (maxpwsize < 32) {
pr_err("Invalid Max Password Size received from hypervisor (%d < 32)\n", maxpwsize);
rc = -EIO;
goto err;
}
if (maxobjlabelsize < 255) {
pr_err("Invalid Max Object Label Size received from hypervisor (%d < 255)\n",
maxobjlabelsize);
rc = -EIO;
goto err;
}
if (totalsize < 4096) {
pr_err("Invalid Total Size received from hypervisor (%d < 4096)\n", totalsize);
rc = -EIO;
goto err;
}
if (version >= 3 && maxlargeobjectsize >= 65536 && maxobjsize != 0xFFFF) {
pr_err("Invalid Max Object Size (0x%x != 0xFFFF)\n", maxobjsize);
rc = -EIO;
goto err;
}
err:
kfree(config);
return rc;
}
u8 plpks_get_version(void)
{
return version;
}
u16 plpks_get_objoverhead(void)
{
return objoverhead;
}
u16 plpks_get_maxpwsize(void)
{
return maxpwsize;
}
u16 plpks_get_maxobjectsize(void)
{
return maxobjsize;
}
u16 plpks_get_maxobjectlabelsize(void)
{
return maxobjlabelsize;
}
u32 plpks_get_totalsize(void)
{
return totalsize;
}
u32 plpks_get_usedspace(void)
{
// Unlike other config values, usedspace regularly changes as objects
// are updated, so we need to refresh.
int rc = _plpks_get_config();
if (rc) {
pr_err("Couldn't get config, rc: %d\n", rc);
return 0;
}
return usedspace;
}
u32 plpks_get_supportedpolicies(void)
{
return supportedpolicies;
}
u32 plpks_get_maxlargeobjectsize(void)
{
return maxlargeobjectsize;
}
u64 plpks_get_signedupdatealgorithms(void)
{
return signedupdatealgorithms;
}
u16 plpks_get_passwordlen(void)
{
return ospasswordlength;
}
bool plpks_is_available(void)
{
int rc;
if (!firmware_has_feature(FW_FEATURE_PLPKS))
return false;
rc = _plpks_get_config();
if (rc)
return false;
return true;
}
static int plpks_confirm_object_flushed(struct label *label,
struct plpks_auth *auth)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
bool timed_out = true;
u64 timeout = 0;
u8 status;
int rc;
do {
rc = plpar_hcall(H_PKS_CONFIRM_OBJECT_FLUSHED, retbuf,
virt_to_phys(auth), virt_to_phys(label),
label->size);
status = retbuf[0];
if (rc) {
timed_out = false;
if (rc == H_NOT_FOUND && status == 1)
rc = 0;
break;
}
if (!rc && status == 1) {
timed_out = false;
break;
}
usleep_range(PLPKS_FLUSH_SLEEP,
PLPKS_FLUSH_SLEEP + PLPKS_FLUSH_SLEEP_RANGE);
timeout = timeout + PLPKS_FLUSH_SLEEP;
} while (timeout < PLPKS_MAX_TIMEOUT);
if (timed_out)
return -ETIMEDOUT;
return pseries_status_to_err(rc);
}
int plpks_signed_update_var(struct plpks_var *var, u64 flags)
{
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
int rc;
struct label *label;
struct plpks_auth *auth;
u64 continuetoken = 0;
u64 timeout = 0;
if (!var->data || var->datalen <= 0 || var->namelen > PLPKS_MAX_NAME_SIZE)
return -EINVAL;
if (!(var->policy & PLPKS_SIGNEDUPDATE))
return -EINVAL;
// Signed updates need the component to be NULL.
if (var->component)
return -EINVAL;
auth = construct_auth(PLPKS_OS_OWNER);
if (IS_ERR(auth))
return PTR_ERR(auth);
label = construct_label(var->component, var->os, var->name, var->namelen);
if (IS_ERR(label)) {
rc = PTR_ERR(label);
goto out;
}
do {
rc = plpar_hcall9(H_PKS_SIGNED_UPDATE, retbuf,
virt_to_phys(auth), virt_to_phys(label),
label->size, var->policy, flags,
virt_to_phys(var->data), var->datalen,
continuetoken);
continuetoken = retbuf[0];
if (pseries_status_to_err(rc) == -EBUSY) {
int delay_ms = get_longbusy_msecs(rc);
mdelay(delay_ms);
timeout += delay_ms;
}
rc = pseries_status_to_err(rc);
} while (rc == -EBUSY && timeout < PLPKS_MAX_TIMEOUT);
if (!rc)
rc = plpks_confirm_object_flushed(label, auth);
kfree(label);
out:
kfree(auth);
return rc;
}
int plpks_write_var(struct plpks_var var)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
struct plpks_auth *auth;
struct label *label;
int rc;
if (!var.component || !var.data || var.datalen <= 0 ||
var.namelen > PLPKS_MAX_NAME_SIZE || var.datalen > PLPKS_MAX_DATA_SIZE)
return -EINVAL;
if (var.policy & PLPKS_SIGNEDUPDATE)
return -EINVAL;
auth = construct_auth(PLPKS_OS_OWNER);
if (IS_ERR(auth))
return PTR_ERR(auth);
label = construct_label(var.component, var.os, var.name, var.namelen);
if (IS_ERR(label)) {
rc = PTR_ERR(label);
goto out;
}
rc = plpar_hcall(H_PKS_WRITE_OBJECT, retbuf, virt_to_phys(auth),
virt_to_phys(label), label->size, var.policy,
virt_to_phys(var.data), var.datalen);
if (!rc)
rc = plpks_confirm_object_flushed(label, auth);
rc = pseries_status_to_err(rc);
kfree(label);
out:
kfree(auth);
return rc;
}
int plpks_remove_var(char *component, u8 varos, struct plpks_var_name vname)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
struct plpks_auth *auth;
struct label *label;
int rc;
if (vname.namelen > PLPKS_MAX_NAME_SIZE)
return -EINVAL;
auth = construct_auth(PLPKS_OS_OWNER);
if (IS_ERR(auth))
return PTR_ERR(auth);
label = construct_label(component, varos, vname.name, vname.namelen);
if (IS_ERR(label)) {
rc = PTR_ERR(label);
goto out;
}
rc = plpar_hcall(H_PKS_REMOVE_OBJECT, retbuf, virt_to_phys(auth),
virt_to_phys(label), label->size);
if (!rc)
rc = plpks_confirm_object_flushed(label, auth);
rc = pseries_status_to_err(rc);
kfree(label);
out:
kfree(auth);
return rc;
}
static int plpks_read_var(u8 consumer, struct plpks_var *var)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
struct plpks_auth *auth;
struct label *label = NULL;
u8 *output;
int rc;
if (var->namelen > PLPKS_MAX_NAME_SIZE)
return -EINVAL;
auth = construct_auth(consumer);
if (IS_ERR(auth))
return PTR_ERR(auth);
if (consumer == PLPKS_OS_OWNER) {
label = construct_label(var->component, var->os, var->name,
var->namelen);
if (IS_ERR(label)) {
rc = PTR_ERR(label);
goto out_free_auth;
}
}
output = kzalloc(maxobjsize, GFP_KERNEL);
if (!output) {
rc = -ENOMEM;
goto out_free_label;
}
if (consumer == PLPKS_OS_OWNER)
rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth),
virt_to_phys(label), label->size, virt_to_phys(output),
maxobjsize);
else
rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth),
virt_to_phys(var->name), var->namelen, virt_to_phys(output),
maxobjsize);
if (rc != H_SUCCESS) {
rc = pseries_status_to_err(rc);
goto out_free_output;
}
if (!var->data || var->datalen > retbuf[0])
var->datalen = retbuf[0];
var->policy = retbuf[1];
if (var->data)
memcpy(var->data, output, var->datalen);
rc = 0;
out_free_output:
kfree(output);
out_free_label:
kfree(label);
out_free_auth:
kfree(auth);
return rc;
}
int plpks_read_os_var(struct plpks_var *var)
{
return plpks_read_var(PLPKS_OS_OWNER, var);
}
int plpks_read_fw_var(struct plpks_var *var)
{
return plpks_read_var(PLPKS_FW_OWNER, var);
}
int plpks_read_bootloader_var(struct plpks_var *var)
{
return plpks_read_var(PLPKS_BOOTLOADER_OWNER, var);
}
int plpks_populate_fdt(void *fdt)
{
int chosen_offset = fdt_path_offset(fdt, "/chosen");
if (chosen_offset < 0) {
pr_err("Can't find chosen node: %s\n",
fdt_strerror(chosen_offset));
return chosen_offset;
}
return fdt_setprop(fdt, chosen_offset, "ibm,plpks-pw", ospassword, ospasswordlength);
}
// Once a password is registered with the hypervisor it cannot be cleared without
// rebooting the LPAR, so to keep using the PLPKS across kexec boots we need to
// recover the previous password from the FDT.
//
// There are a few challenges here. We don't want the password to be visible to
// users, so we need to clear it from the FDT. This has to be done in early boot.
// Clearing it from the FDT would make the FDT's checksum invalid, so we have to
// manually cause the checksum to be recalculated.
void __init plpks_early_init_devtree(void)
{
void *fdt = initial_boot_params;
int chosen_node = fdt_path_offset(fdt, "/chosen");
const u8 *password;
int len;
if (chosen_node < 0)
return;
password = fdt_getprop(fdt, chosen_node, "ibm,plpks-pw", &len);
if (len <= 0) {
pr_debug("Couldn't find ibm,plpks-pw node.\n");
return;
}
ospassword = memblock_alloc_raw(len, SMP_CACHE_BYTES);
if (!ospassword) {
pr_err("Error allocating memory for password.\n");
goto out;
}
memcpy(ospassword, password, len);
ospasswordlength = (u16)len;
out:
fdt_nop_property(fdt, chosen_node, "ibm,plpks-pw");
// Since we've cleared the password, we must update the FDT checksum
early_init_dt_verify(fdt);
}
static __init int pseries_plpks_init(void)
{
int rc;
if (!firmware_has_feature(FW_FEATURE_PLPKS))
return -ENODEV;
rc = _plpks_get_config();
if (rc) {
pr_err("POWER LPAR Platform KeyStore is not supported or enabled\n");
return rc;
}
rc = plpks_gen_password();
if (rc)
pr_err("Failed setting POWER LPAR Platform KeyStore Password\n");
else
pr_info("POWER LPAR Platform KeyStore initialized successfully\n");
return rc;
}
machine_arch_initcall(pseries, pseries_plpks_init);
| linux-master | arch/powerpc/platforms/pseries/plpks.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* hvconsole.c
* Copyright (C) 2004 Hollis Blanchard, IBM Corporation
* Copyright (C) 2004 IBM Corporation
*
* Additional Author(s):
* Ryan S. Arnold <[email protected]>
*
* LPAR console support.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <asm/hvcall.h>
#include <asm/hvconsole.h>
#include <asm/plpar_wrappers.h>
/**
* hvc_get_chars - retrieve characters from firmware for denoted vterm adapter
* @vtermno: The vtermno or unit_address of the adapter from which to fetch the
* data.
* @buf: The character buffer into which to put the character data fetched from
* firmware.
* @count: not used?
*/
int hvc_get_chars(uint32_t vtermno, char *buf, int count)
{
long ret;
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
unsigned long *lbuf = (unsigned long *)buf;
ret = plpar_hcall(H_GET_TERM_CHAR, retbuf, vtermno);
lbuf[0] = be64_to_cpu(retbuf[1]);
lbuf[1] = be64_to_cpu(retbuf[2]);
if (ret == H_SUCCESS)
return retbuf[0];
return 0;
}
EXPORT_SYMBOL(hvc_get_chars);
/**
* hvc_put_chars: send characters to firmware for denoted vterm adapter
* @vtermno: The vtermno or unit_address of the adapter from which the data
* originated.
* @buf: The character buffer that contains the character data to send to
* firmware. Must be at least 16 bytes, even if count is less than 16.
* @count: Send this number of characters.
*/
int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
{
unsigned long *lbuf = (unsigned long *) buf;
long ret;
/* hcall will ret H_PARAMETER if 'count' exceeds firmware max.*/
if (count > MAX_VIO_PUT_CHARS)
count = MAX_VIO_PUT_CHARS;
ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count,
cpu_to_be64(lbuf[0]),
cpu_to_be64(lbuf[1]));
if (ret == H_SUCCESS)
return count;
if (ret == H_BUSY)
return -EAGAIN;
return -EIO;
}
EXPORT_SYMBOL(hvc_put_chars);
| linux-master | arch/powerpc/platforms/pseries/hvconsole.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* The file intends to implement the platform dependent EEH operations on pseries.
* Actually, the pseries platform is built based on RTAS heavily. That means the
* pseries platform dependent EEH operations will be built on RTAS calls. The functions
* are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
* been done.
*
* Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
* Copyright IBM Corporation 2001, 2005, 2006
* Copyright Dave Engebretsen & Todd Inglett 2001
* Copyright Linas Vepstas 2005, 2006
*/
#include <linux/atomic.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/of.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/crash_dump.h>
#include <asm/eeh.h>
#include <asm/eeh_event.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/rtas.h>
/* RTAS tokens */
static int ibm_set_eeh_option;
static int ibm_set_slot_reset;
static int ibm_read_slot_reset_state;
static int ibm_read_slot_reset_state2;
static int ibm_slot_error_detail;
static int ibm_get_config_addr_info;
static int ibm_get_config_addr_info2;
static int ibm_configure_pe;
static void pseries_eeh_init_edev(struct pci_dn *pdn);
static void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
{
struct pci_dn *pdn = pci_get_pdn(pdev);
if (eeh_has_flag(EEH_FORCE_DISABLED))
return;
dev_dbg(&pdev->dev, "EEH: Setting up device\n");
#ifdef CONFIG_PCI_IOV
if (pdev->is_virtfn) {
pdn->device_id = pdev->device;
pdn->vendor_id = pdev->vendor;
pdn->class_code = pdev->class;
/*
* Last allow unfreeze return code used for retrieval
* by user space in eeh-sysfs to show the last command
* completion from platform.
*/
pdn->last_allow_rc = 0;
}
#endif
pseries_eeh_init_edev(pdn);
#ifdef CONFIG_PCI_IOV
if (pdev->is_virtfn) {
/*
* FIXME: This really should be handled by choosing the right
* parent PE in pseries_eeh_init_edev().
*/
struct eeh_pe *physfn_pe = pci_dev_to_eeh_dev(pdev->physfn)->pe;
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
edev->pe_config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
eeh_pe_tree_remove(edev); /* Remove as it is adding to bus pe */
eeh_pe_tree_insert(edev, physfn_pe); /* Add as VF PE type */
}
#endif
eeh_probe_device(pdev);
}
/**
* pseries_eeh_get_pe_config_addr - Find the pe_config_addr for a device
* @pdn: pci_dn of the input device
*
* The EEH RTAS calls use a tuple consisting of: (buid_hi, buid_lo,
* pe_config_addr) as a handle to a given PE. This function finds the
* pe_config_addr based on the device's config addr.
*
* Keep in mind that the pe_config_addr *might* be numerically identical to the
* device's config addr, but the two are conceptually distinct.
*
* Returns the pe_config_addr, or a negative error code.
*/
static int pseries_eeh_get_pe_config_addr(struct pci_dn *pdn)
{
int config_addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
struct pci_controller *phb = pdn->phb;
int ret, rets[3];
if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
/*
* First of all, use function 1 to determine if this device is
* part of a PE or not. ret[0] being zero indicates it's not.
*/
ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
config_addr, BUID_HI(phb->buid),
BUID_LO(phb->buid), 1);
if (ret || (rets[0] == 0))
return -ENOENT;
/* Retrieve the associated PE config address with function 0 */
ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
config_addr, BUID_HI(phb->buid),
BUID_LO(phb->buid), 0);
if (ret) {
pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
__func__, phb->global_number, config_addr);
return -ENXIO;
}
return rets[0];
}
if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
config_addr, BUID_HI(phb->buid),
BUID_LO(phb->buid), 0);
if (ret) {
pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
__func__, phb->global_number, config_addr);
return -ENXIO;
}
return rets[0];
}
/*
* PAPR does describe a process for finding the pe_config_addr that was
* used before the ibm,get-config-addr-info calls were added. However,
* I haven't found *any* systems that don't have that RTAS call
* implemented. If you happen to find one that needs the old DT based
* process, patches are welcome!
*/
return -ENOENT;
}
/**
* pseries_eeh_phb_reset - Reset the specified PHB
* @phb: PCI controller
* @config_addr: the associated config address
* @option: reset option
*
* Reset the specified PHB/PE
*/
static int pseries_eeh_phb_reset(struct pci_controller *phb, int config_addr, int option)
{
int ret;
/* Reset PE through RTAS call */
ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
config_addr, BUID_HI(phb->buid),
BUID_LO(phb->buid), option);
/* If fundamental-reset not supported, try hot-reset */
if (option == EEH_RESET_FUNDAMENTAL && ret == -8) {
option = EEH_RESET_HOT;
ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
config_addr, BUID_HI(phb->buid),
BUID_LO(phb->buid), option);
}
/* We need reset hold or settlement delay */
if (option == EEH_RESET_FUNDAMENTAL || option == EEH_RESET_HOT)
msleep(EEH_PE_RST_HOLD_TIME);
else
msleep(EEH_PE_RST_SETTLE_TIME);
return ret;
}
/**
* pseries_eeh_phb_configure_bridge - Configure PCI bridges in the indicated PE
* @phb: PCI controller
* @config_addr: the associated config address
*
* The function will be called to reconfigure the bridges included
* in the specified PE so that the mulfunctional PE would be recovered
* again.
*/
static int pseries_eeh_phb_configure_bridge(struct pci_controller *phb, int config_addr)
{
int ret;
/* Waiting 0.2s maximum before skipping configuration */
int max_wait = 200;
while (max_wait > 0) {
ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
config_addr, BUID_HI(phb->buid),
BUID_LO(phb->buid));
if (!ret)
return ret;
if (ret < 0)
break;
/*
* If RTAS returns a delay value that's above 100ms, cut it
* down to 100ms in case firmware made a mistake. For more
* on how these delay values work see rtas_busy_delay_time
*/
if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
ret <= RTAS_EXTENDED_DELAY_MAX)
ret = RTAS_EXTENDED_DELAY_MIN+2;
max_wait -= rtas_busy_delay_time(ret);
if (max_wait < 0)
break;
rtas_busy_delay(ret);
}
pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
__func__, phb->global_number, config_addr, ret);
/* PAPR defines -3 as "Parameter Error" for this function: */
if (ret == -3)
return -EINVAL;
else
return -EIO;
}
/*
* Buffer for reporting slot-error-detail rtas calls. Its here
* in BSS, and not dynamically alloced, so that it ends up in
* RMO where RTAS can access it.
*/
static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
static DEFINE_SPINLOCK(slot_errbuf_lock);
static int eeh_error_buf_size;
static int pseries_eeh_cap_start(struct pci_dn *pdn)
{
u32 status;
if (!pdn)
return 0;
rtas_read_config(pdn, PCI_STATUS, 2, &status);
if (!(status & PCI_STATUS_CAP_LIST))
return 0;
return PCI_CAPABILITY_LIST;
}
static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap)
{
int pos = pseries_eeh_cap_start(pdn);
int cnt = 48; /* Maximal number of capabilities */
u32 id;
if (!pos)
return 0;
while (cnt--) {
rtas_read_config(pdn, pos, 1, &pos);
if (pos < 0x40)
break;
pos &= ~3;
rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
if (id == 0xff)
break;
if (id == cap)
return pos;
pos += PCI_CAP_LIST_NEXT;
}
return 0;
}
static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
{
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
u32 header;
int pos = 256;
int ttl = (4096 - 256) / 8;
if (!edev || !edev->pcie_cap)
return 0;
if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
return 0;
else if (!header)
return 0;
while (ttl-- > 0) {
if (PCI_EXT_CAP_ID(header) == cap && pos)
return pos;
pos = PCI_EXT_CAP_NEXT(header);
if (pos < 256)
break;
if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
break;
}
return 0;
}
/**
* pseries_eeh_pe_get_parent - Retrieve the parent PE
* @edev: EEH device
*
* The whole PEs existing in the system are organized as hierarchy
* tree. The function is used to retrieve the parent PE according
* to the parent EEH device.
*/
static struct eeh_pe *pseries_eeh_pe_get_parent(struct eeh_dev *edev)
{
struct eeh_dev *parent;
struct pci_dn *pdn = eeh_dev_to_pdn(edev);
/*
* It might have the case for the indirect parent
* EEH device already having associated PE, but
* the direct parent EEH device doesn't have yet.
*/
if (edev->physfn)
pdn = pci_get_pdn(edev->physfn);
else
pdn = pdn ? pdn->parent : NULL;
while (pdn) {
/* We're poking out of PCI territory */
parent = pdn_to_eeh_dev(pdn);
if (!parent)
return NULL;
if (parent->pe)
return parent->pe;
pdn = pdn->parent;
}
return NULL;
}
/**
* pseries_eeh_init_edev - initialise the eeh_dev and eeh_pe for a pci_dn
*
* @pdn: PCI device node
*
* When we discover a new PCI device via the device-tree we create a
* corresponding pci_dn and we allocate, but don't initialise, an eeh_dev.
* This function takes care of the initialisation and inserts the eeh_dev
* into the correct eeh_pe. If no eeh_pe exists we'll allocate one.
*/
static void pseries_eeh_init_edev(struct pci_dn *pdn)
{
struct eeh_pe pe, *parent;
struct eeh_dev *edev;
u32 pcie_flags;
int ret;
if (WARN_ON_ONCE(!eeh_has_flag(EEH_PROBE_MODE_DEVTREE)))
return;
/*
* Find the eeh_dev for this pdn. The storage for the eeh_dev was
* allocated at the same time as the pci_dn.
*
* XXX: We should probably re-visit that.
*/
edev = pdn_to_eeh_dev(pdn);
if (!edev)
return;
/*
* If ->pe is set then we've already probed this device. We hit
* this path when a pci_dev is removed and rescanned while recovering
* a PE (i.e. for devices where the driver doesn't support error
* recovery).
*/
if (edev->pe)
return;
/* Check class/vendor/device IDs */
if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code)
return;
/* Skip for PCI-ISA bridge */
if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
return;
eeh_edev_dbg(edev, "Probing device\n");
/*
* Update class code and mode of eeh device. We need
* correctly reflects that current device is root port
* or PCIe switch downstream port.
*/
edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
edev->mode &= 0xFFFFFF00;
if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
edev->mode |= EEH_DEV_BRIDGE;
if (edev->pcie_cap) {
rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
2, &pcie_flags);
pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
edev->mode |= EEH_DEV_ROOT_PORT;
else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
edev->mode |= EEH_DEV_DS_PORT;
}
}
/* first up, find the pe_config_addr for the PE containing the device */
ret = pseries_eeh_get_pe_config_addr(pdn);
if (ret < 0) {
eeh_edev_dbg(edev, "Unable to find pe_config_addr\n");
goto err;
}
/* Try enable EEH on the fake PE */
memset(&pe, 0, sizeof(struct eeh_pe));
pe.phb = pdn->phb;
pe.addr = ret;
eeh_edev_dbg(edev, "Enabling EEH on device\n");
ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
if (ret) {
eeh_edev_dbg(edev, "EEH failed to enable on device (code %d)\n", ret);
goto err;
}
edev->pe_config_addr = pe.addr;
eeh_add_flag(EEH_ENABLED);
parent = pseries_eeh_pe_get_parent(edev);
eeh_pe_tree_insert(edev, parent);
eeh_save_bars(edev);
eeh_edev_dbg(edev, "EEH enabled for device");
return;
err:
eeh_edev_dbg(edev, "EEH is unsupported on device (code = %d)\n", ret);
}
static struct eeh_dev *pseries_eeh_probe(struct pci_dev *pdev)
{
struct eeh_dev *edev;
struct pci_dn *pdn;
pdn = pci_get_pdn_by_devfn(pdev->bus, pdev->devfn);
if (!pdn)
return NULL;
/*
* If the system supports EEH on this device then the eeh_dev was
* configured and inserted into a PE in pseries_eeh_init_edev()
*/
edev = pdn_to_eeh_dev(pdn);
if (!edev || !edev->pe)
return NULL;
return edev;
}
/**
* pseries_eeh_init_edev_recursive - Enable EEH for the indicated device
* @pdn: PCI device node
*
* This routine must be used to perform EEH initialization for the
* indicated PCI device that was added after system boot (e.g.
* hotplug, dlpar).
*/
void pseries_eeh_init_edev_recursive(struct pci_dn *pdn)
{
struct pci_dn *n;
if (!pdn)
return;
list_for_each_entry(n, &pdn->child_list, list)
pseries_eeh_init_edev_recursive(n);
pseries_eeh_init_edev(pdn);
}
EXPORT_SYMBOL_GPL(pseries_eeh_init_edev_recursive);
/**
* pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
* @pe: EEH PE
* @option: operation to be issued
*
* The function is used to control the EEH functionality globally.
* Currently, following options are support according to PAPR:
* Enable EEH, Disable EEH, Enable MMIO and Enable DMA
*/
static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
{
int ret = 0;
/*
* When we're enabling or disabling EEH functionality on
* the particular PE, the PE config address is possibly
* unavailable. Therefore, we have to figure it out from
* the FDT node.
*/
switch (option) {
case EEH_OPT_DISABLE:
case EEH_OPT_ENABLE:
case EEH_OPT_THAW_MMIO:
case EEH_OPT_THAW_DMA:
break;
case EEH_OPT_FREEZE_PE:
/* Not support */
return 0;
default:
pr_err("%s: Invalid option %d\n", __func__, option);
return -EINVAL;
}
ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
pe->addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid), option);
return ret;
}
/**
* pseries_eeh_get_state - Retrieve PE state
* @pe: EEH PE
* @delay: suggested time to wait if state is unavailable
*
* Retrieve the state of the specified PE. On RTAS compliant
* pseries platform, there already has one dedicated RTAS function
* for the purpose. It's notable that the associated PE config address
* might be ready when calling the function. Therefore, endeavour to
* use the PE config address if possible. Further more, there're 2
* RTAS calls for the purpose, we need to try the new one and back
* to the old one if the new one couldn't work properly.
*/
static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay)
{
int ret;
int rets[4];
int result;
if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
pe->addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid));
} else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
/* Fake PE unavailable info */
rets[2] = 0;
ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
pe->addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid));
} else {
return EEH_STATE_NOT_SUPPORT;
}
if (ret)
return ret;
/* Parse the result out */
if (!rets[1])
return EEH_STATE_NOT_SUPPORT;
switch(rets[0]) {
case 0:
result = EEH_STATE_MMIO_ACTIVE |
EEH_STATE_DMA_ACTIVE;
break;
case 1:
result = EEH_STATE_RESET_ACTIVE |
EEH_STATE_MMIO_ACTIVE |
EEH_STATE_DMA_ACTIVE;
break;
case 2:
result = 0;
break;
case 4:
result = EEH_STATE_MMIO_ENABLED;
break;
case 5:
if (rets[2]) {
if (delay)
*delay = rets[2];
result = EEH_STATE_UNAVAILABLE;
} else {
result = EEH_STATE_NOT_SUPPORT;
}
break;
default:
result = EEH_STATE_NOT_SUPPORT;
}
return result;
}
/**
* pseries_eeh_reset - Reset the specified PE
* @pe: EEH PE
* @option: reset option
*
* Reset the specified PE
*/
static int pseries_eeh_reset(struct eeh_pe *pe, int option)
{
return pseries_eeh_phb_reset(pe->phb, pe->addr, option);
}
/**
* pseries_eeh_get_log - Retrieve error log
* @pe: EEH PE
* @severity: temporary or permanent error log
* @drv_log: driver log to be combined with retrieved error log
* @len: length of driver log
*
* Retrieve the temporary or permanent error from the PE.
* Actually, the error will be retrieved through the dedicated
* RTAS call.
*/
static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&slot_errbuf_lock, flags);
memset(slot_errbuf, 0, eeh_error_buf_size);
ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, pe->addr,
BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid),
virt_to_phys(drv_log), len,
virt_to_phys(slot_errbuf), eeh_error_buf_size,
severity);
if (!ret)
log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
spin_unlock_irqrestore(&slot_errbuf_lock, flags);
return ret;
}
/**
* pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
* @pe: EEH PE
*
*/
static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
{
return pseries_eeh_phb_configure_bridge(pe->phb, pe->addr);
}
/**
* pseries_eeh_read_config - Read PCI config space
* @edev: EEH device handle
* @where: PCI config space offset
* @size: size to read
* @val: return value
*
* Read config space from the speicifed device
*/
static int pseries_eeh_read_config(struct eeh_dev *edev, int where, int size, u32 *val)
{
struct pci_dn *pdn = eeh_dev_to_pdn(edev);
return rtas_read_config(pdn, where, size, val);
}
/**
* pseries_eeh_write_config - Write PCI config space
* @edev: EEH device handle
* @where: PCI config space offset
* @size: size to write
* @val: value to be written
*
* Write config space to the specified device
*/
static int pseries_eeh_write_config(struct eeh_dev *edev, int where, int size, u32 val)
{
struct pci_dn *pdn = eeh_dev_to_pdn(edev);
return rtas_write_config(pdn, where, size, val);
}
#ifdef CONFIG_PCI_IOV
static int pseries_send_allow_unfreeze(struct pci_dn *pdn, u16 *vf_pe_array, int cur_vfs)
{
int rc;
int ibm_allow_unfreeze = rtas_function_token(RTAS_FN_IBM_OPEN_SRIOV_ALLOW_UNFREEZE);
unsigned long buid, addr;
addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
buid = pdn->phb->buid;
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, vf_pe_array, RTAS_DATA_BUF_SIZE);
rc = rtas_call(ibm_allow_unfreeze, 5, 1, NULL,
addr,
BUID_HI(buid),
BUID_LO(buid),
rtas_data_buf, cur_vfs * sizeof(u16));
spin_unlock(&rtas_data_buf_lock);
if (rc)
pr_warn("%s: Failed to allow unfreeze for PHB#%x-PE#%lx, rc=%x\n",
__func__,
pdn->phb->global_number, addr, rc);
return rc;
}
static int pseries_call_allow_unfreeze(struct eeh_dev *edev)
{
int cur_vfs = 0, rc = 0, vf_index, bus, devfn, vf_pe_num;
struct pci_dn *pdn, *tmp, *parent, *physfn_pdn;
u16 *vf_pe_array;
vf_pe_array = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
if (!vf_pe_array)
return -ENOMEM;
if (pci_num_vf(edev->physfn ? edev->physfn : edev->pdev)) {
if (edev->pdev->is_physfn) {
cur_vfs = pci_num_vf(edev->pdev);
pdn = eeh_dev_to_pdn(edev);
parent = pdn->parent;
for (vf_index = 0; vf_index < cur_vfs; vf_index++)
vf_pe_array[vf_index] =
cpu_to_be16(pdn->pe_num_map[vf_index]);
rc = pseries_send_allow_unfreeze(pdn, vf_pe_array,
cur_vfs);
pdn->last_allow_rc = rc;
for (vf_index = 0; vf_index < cur_vfs; vf_index++) {
list_for_each_entry_safe(pdn, tmp,
&parent->child_list,
list) {
bus = pci_iov_virtfn_bus(edev->pdev,
vf_index);
devfn = pci_iov_virtfn_devfn(edev->pdev,
vf_index);
if (pdn->busno != bus ||
pdn->devfn != devfn)
continue;
pdn->last_allow_rc = rc;
}
}
} else {
pdn = pci_get_pdn(edev->pdev);
physfn_pdn = pci_get_pdn(edev->physfn);
vf_pe_num = physfn_pdn->pe_num_map[edev->vf_index];
vf_pe_array[0] = cpu_to_be16(vf_pe_num);
rc = pseries_send_allow_unfreeze(physfn_pdn,
vf_pe_array, 1);
pdn->last_allow_rc = rc;
}
}
kfree(vf_pe_array);
return rc;
}
static int pseries_notify_resume(struct eeh_dev *edev)
{
if (!edev)
return -EEXIST;
if (rtas_function_token(RTAS_FN_IBM_OPEN_SRIOV_ALLOW_UNFREEZE) == RTAS_UNKNOWN_SERVICE)
return -EINVAL;
if (edev->pdev->is_physfn || edev->pdev->is_virtfn)
return pseries_call_allow_unfreeze(edev);
return 0;
}
#endif
static struct eeh_ops pseries_eeh_ops = {
.name = "pseries",
.probe = pseries_eeh_probe,
.set_option = pseries_eeh_set_option,
.get_state = pseries_eeh_get_state,
.reset = pseries_eeh_reset,
.get_log = pseries_eeh_get_log,
.configure_bridge = pseries_eeh_configure_bridge,
.err_inject = NULL,
.read_config = pseries_eeh_read_config,
.write_config = pseries_eeh_write_config,
.next_error = NULL,
.restore_config = NULL, /* NB: configure_bridge() does this */
#ifdef CONFIG_PCI_IOV
.notify_resume = pseries_notify_resume
#endif
};
/**
* eeh_pseries_init - Register platform dependent EEH operations
*
* EEH initialization on pseries platform. This function should be
* called before any EEH related functions.
*/
static int __init eeh_pseries_init(void)
{
struct pci_controller *phb;
struct pci_dn *pdn;
int ret, config_addr;
/* figure out EEH RTAS function call tokens */
ibm_set_eeh_option = rtas_function_token(RTAS_FN_IBM_SET_EEH_OPTION);
ibm_set_slot_reset = rtas_function_token(RTAS_FN_IBM_SET_SLOT_RESET);
ibm_read_slot_reset_state2 = rtas_function_token(RTAS_FN_IBM_READ_SLOT_RESET_STATE2);
ibm_read_slot_reset_state = rtas_function_token(RTAS_FN_IBM_READ_SLOT_RESET_STATE);
ibm_slot_error_detail = rtas_function_token(RTAS_FN_IBM_SLOT_ERROR_DETAIL);
ibm_get_config_addr_info2 = rtas_function_token(RTAS_FN_IBM_GET_CONFIG_ADDR_INFO2);
ibm_get_config_addr_info = rtas_function_token(RTAS_FN_IBM_GET_CONFIG_ADDR_INFO);
ibm_configure_pe = rtas_function_token(RTAS_FN_IBM_CONFIGURE_PE);
/*
* ibm,configure-pe and ibm,configure-bridge have the same semantics,
* however ibm,configure-pe can be faster. If we can't find
* ibm,configure-pe then fall back to using ibm,configure-bridge.
*/
if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
ibm_configure_pe = rtas_function_token(RTAS_FN_IBM_CONFIGURE_BRIDGE);
/*
* Necessary sanity check. We needn't check "get-config-addr-info"
* and its variant since the old firmware probably support address
* of domain/bus/slot/function for EEH RTAS operations.
*/
if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE ||
ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE ||
(ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
pr_info("EEH functionality not supported\n");
return -EINVAL;
}
/* Initialize error log size */
eeh_error_buf_size = rtas_get_error_log_max();
/* Set EEH probe mode */
eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG);
/* Set EEH machine dependent code */
ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device;
if (is_kdump_kernel() || reset_devices) {
pr_info("Issue PHB reset ...\n");
list_for_each_entry(phb, &hose_list, list_node) {
// Skip if the slot is empty
if (list_empty(&PCI_DN(phb->dn)->child_list))
continue;
pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
config_addr = pseries_eeh_get_pe_config_addr(pdn);
/* invalid PE config addr */
if (config_addr < 0)
continue;
pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_FUNDAMENTAL);
pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_DEACTIVATE);
pseries_eeh_phb_configure_bridge(phb, config_addr);
}
}
ret = eeh_init(&pseries_eeh_ops);
if (!ret)
pr_info("EEH: pSeries platform initialized\n");
else
pr_info("EEH: pSeries platform initialization failure (%d)\n",
ret);
return ret;
}
machine_arch_initcall(pseries, eeh_pseries_init);
| linux-master | arch/powerpc/platforms/pseries/eeh_pseries.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2001 Dave Engebretsen IBM Corporation
*/
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include "pseries.h"
void __init request_event_sources_irqs(struct device_node *np,
irq_handler_t handler,
const char *name)
{
int i, virq, rc;
for (i = 0; i < 16; i++) {
virq = of_irq_get(np, i);
if (virq < 0)
return;
if (WARN(!virq, "event-sources: Unable to allocate "
"interrupt number for %pOF\n", np))
continue;
rc = request_irq(virq, handler, 0, name, NULL);
if (WARN(rc, "event-sources: Unable to request interrupt %d for %pOF\n",
virq, np))
return;
}
}
| linux-master | arch/powerpc/platforms/pseries/event_sources.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Virtual Processor Dispatch Trace Log
*
* (C) Copyright IBM Corporation 2009
*
* Author: Jeremy Kerr <[email protected]>
*/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <asm/smp.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <asm/firmware.h>
#include <asm/dtl.h>
#include <asm/lppaca.h>
#include <asm/plpar_wrappers.h>
#include <asm/machdep.h>
#ifdef CONFIG_DTL
struct dtl {
struct dtl_entry *buf;
int cpu;
int buf_entries;
u64 last_idx;
spinlock_t lock;
};
static DEFINE_PER_CPU(struct dtl, cpu_dtl);
static u8 dtl_event_mask = DTL_LOG_ALL;
/*
* Size of per-cpu log buffers. Firmware requires that the buffer does
* not cross a 4k boundary.
*/
static int dtl_buf_entries = N_DISPATCH_LOG;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/*
* When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
* reading from the dispatch trace log. If other code wants to consume
* DTL entries, it can set this pointer to a function that will get
* called once for each DTL entry that gets processed.
*/
static void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
struct dtl_ring {
u64 write_index;
struct dtl_entry *write_ptr;
struct dtl_entry *buf;
struct dtl_entry *buf_end;
};
static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
static atomic_t dtl_count;
/*
* The cpu accounting code controls the DTL ring buffer, and we get
* given entries as they are processed.
*/
static void consume_dtle(struct dtl_entry *dtle, u64 index)
{
struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
struct dtl_entry *wp = dtlr->write_ptr;
struct lppaca *vpa = local_paca->lppaca_ptr;
if (!wp)
return;
*wp = *dtle;
barrier();
/* check for hypervisor ring buffer overflow, ignore this entry if so */
if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
return;
++wp;
if (wp == dtlr->buf_end)
wp = dtlr->buf;
dtlr->write_ptr = wp;
/* incrementing write_index makes the new entry visible */
smp_wmb();
++dtlr->write_index;
}
static int dtl_start(struct dtl *dtl)
{
struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
dtlr->buf = dtl->buf;
dtlr->buf_end = dtl->buf + dtl->buf_entries;
dtlr->write_index = 0;
/* setting write_ptr enables logging into our buffer */
smp_wmb();
dtlr->write_ptr = dtl->buf;
/* enable event logging */
lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
dtl_consumer = consume_dtle;
atomic_inc(&dtl_count);
return 0;
}
static void dtl_stop(struct dtl *dtl)
{
struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
dtlr->write_ptr = NULL;
smp_wmb();
dtlr->buf = NULL;
/* restore dtl_enable_mask */
lppaca_of(dtl->cpu).dtl_enable_mask = DTL_LOG_PREEMPT;
if (atomic_dec_and_test(&dtl_count))
dtl_consumer = NULL;
}
static u64 dtl_current_index(struct dtl *dtl)
{
return per_cpu(dtl_rings, dtl->cpu).write_index;
}
#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static int dtl_start(struct dtl *dtl)
{
unsigned long addr;
int ret, hwcpu;
/* Register our dtl buffer with the hypervisor. The HV expects the
* buffer size to be passed in the second word of the buffer */
((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
hwcpu = get_hard_smp_processor_id(dtl->cpu);
addr = __pa(dtl->buf);
ret = register_dtl(hwcpu, addr);
if (ret) {
printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
"failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
return -EIO;
}
/* set our initial buffer indices */
lppaca_of(dtl->cpu).dtl_idx = 0;
/* ensure that our updates to the lppaca fields have occurred before
* we actually enable the logging */
smp_wmb();
/* enable event logging */
lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
return 0;
}
static void dtl_stop(struct dtl *dtl)
{
int hwcpu = get_hard_smp_processor_id(dtl->cpu);
lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
unregister_dtl(hwcpu);
}
static u64 dtl_current_index(struct dtl *dtl)
{
return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static int dtl_enable(struct dtl *dtl)
{
long int n_entries;
long int rc;
struct dtl_entry *buf = NULL;
if (!dtl_cache)
return -ENOMEM;
/* only allow one reader */
if (dtl->buf)
return -EBUSY;
/* ensure there are no other conflicting dtl users */
if (!read_trylock(&dtl_access_lock))
return -EBUSY;
n_entries = dtl_buf_entries;
buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
if (!buf) {
printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
__func__, dtl->cpu);
read_unlock(&dtl_access_lock);
return -ENOMEM;
}
spin_lock(&dtl->lock);
rc = -EBUSY;
if (!dtl->buf) {
/* store the original allocation size for use during read */
dtl->buf_entries = n_entries;
dtl->buf = buf;
dtl->last_idx = 0;
rc = dtl_start(dtl);
if (rc)
dtl->buf = NULL;
}
spin_unlock(&dtl->lock);
if (rc) {
read_unlock(&dtl_access_lock);
kmem_cache_free(dtl_cache, buf);
}
return rc;
}
static void dtl_disable(struct dtl *dtl)
{
spin_lock(&dtl->lock);
dtl_stop(dtl);
kmem_cache_free(dtl_cache, dtl->buf);
dtl->buf = NULL;
dtl->buf_entries = 0;
spin_unlock(&dtl->lock);
read_unlock(&dtl_access_lock);
}
/* file interface */
static int dtl_file_open(struct inode *inode, struct file *filp)
{
struct dtl *dtl = inode->i_private;
int rc;
rc = dtl_enable(dtl);
if (rc)
return rc;
filp->private_data = dtl;
return 0;
}
static int dtl_file_release(struct inode *inode, struct file *filp)
{
struct dtl *dtl = inode->i_private;
dtl_disable(dtl);
return 0;
}
static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
loff_t *pos)
{
long int rc, n_read, n_req, read_size;
struct dtl *dtl;
u64 cur_idx, last_idx, i;
if ((len % sizeof(struct dtl_entry)) != 0)
return -EINVAL;
dtl = filp->private_data;
/* requested number of entries to read */
n_req = len / sizeof(struct dtl_entry);
/* actual number of entries read */
n_read = 0;
spin_lock(&dtl->lock);
cur_idx = dtl_current_index(dtl);
last_idx = dtl->last_idx;
if (last_idx + dtl->buf_entries <= cur_idx)
last_idx = cur_idx - dtl->buf_entries + 1;
if (last_idx + n_req > cur_idx)
n_req = cur_idx - last_idx;
if (n_req > 0)
dtl->last_idx = last_idx + n_req;
spin_unlock(&dtl->lock);
if (n_req <= 0)
return 0;
i = last_idx % dtl->buf_entries;
/* read the tail of the buffer if we've wrapped */
if (i + n_req > dtl->buf_entries) {
read_size = dtl->buf_entries - i;
rc = copy_to_user(buf, &dtl->buf[i],
read_size * sizeof(struct dtl_entry));
if (rc)
return -EFAULT;
i = 0;
n_req -= read_size;
n_read += read_size;
buf += read_size * sizeof(struct dtl_entry);
}
/* .. and now the head */
rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
if (rc)
return -EFAULT;
n_read += n_req;
return n_read * sizeof(struct dtl_entry);
}
static const struct file_operations dtl_fops = {
.open = dtl_file_open,
.release = dtl_file_release,
.read = dtl_file_read,
.llseek = no_llseek,
};
static struct dentry *dtl_dir;
static void dtl_setup_file(struct dtl *dtl)
{
char name[10];
sprintf(name, "cpu-%d", dtl->cpu);
debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
}
static int dtl_init(void)
{
int i;
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return -ENODEV;
/* set up common debugfs structure */
dtl_dir = debugfs_create_dir("dtl", arch_debugfs_dir);
debugfs_create_x8("dtl_event_mask", 0600, dtl_dir, &dtl_event_mask);
debugfs_create_u32("dtl_buf_entries", 0400, dtl_dir, &dtl_buf_entries);
/* set up the per-cpu log structures */
for_each_possible_cpu(i) {
struct dtl *dtl = &per_cpu(cpu_dtl, i);
spin_lock_init(&dtl->lock);
dtl->cpu = i;
dtl_setup_file(dtl);
}
return 0;
}
machine_arch_initcall(pseries, dtl_init);
#endif /* CONFIG_DTL */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/*
* Scan the dispatch trace log and count up the stolen time.
* Should be called with interrupts disabled.
*/
static notrace u64 scan_dispatch_log(u64 stop_tb)
{
u64 i = local_paca->dtl_ridx;
struct dtl_entry *dtl = local_paca->dtl_curr;
struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
struct lppaca *vpa = local_paca->lppaca_ptr;
u64 tb_delta;
u64 stolen = 0;
u64 dtb;
if (!dtl)
return 0;
if (i == be64_to_cpu(vpa->dtl_idx))
return 0;
while (i < be64_to_cpu(vpa->dtl_idx)) {
dtb = be64_to_cpu(dtl->timebase);
tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
be32_to_cpu(dtl->ready_to_enqueue_time);
barrier();
if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
/* buffer has overflowed */
i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
continue;
}
if (dtb > stop_tb)
break;
#ifdef CONFIG_DTL
if (dtl_consumer)
dtl_consumer(dtl, i);
#endif
stolen += tb_delta;
++i;
++dtl;
if (dtl == dtl_end)
dtl = local_paca->dispatch_log;
}
local_paca->dtl_ridx = i;
local_paca->dtl_curr = dtl;
return stolen;
}
/*
* Accumulate stolen time by scanning the dispatch trace log.
* Called on entry from user mode.
*/
void notrace pseries_accumulate_stolen_time(void)
{
u64 sst, ust;
struct cpu_accounting_data *acct = &local_paca->accounting;
sst = scan_dispatch_log(acct->starttime_user);
ust = scan_dispatch_log(acct->starttime);
acct->stime -= sst;
acct->utime -= ust;
acct->steal_time += ust + sst;
}
u64 pseries_calculate_stolen_time(u64 stop_tb)
{
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;
if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
return scan_dispatch_log(stop_tb);
return 0;
}
#endif
| linux-master | arch/powerpc/platforms/pseries/dtl.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SMP support for pSeries machines.
*
* Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
*
* Plus various changes from other IBM teams...
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/paca.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
#include <asm/rtas.h>
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
#include <asm/xics.h>
#include <asm/xive.h>
#include <asm/dbell.h>
#include <asm/plpar_wrappers.h>
#include <asm/code-patching.h>
#include <asm/svm.h>
#include <asm/kvm_guest.h>
#include "pseries.h"
/*
* The Primary thread of each non-boot processor was started from the OF client
* interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
*/
static cpumask_var_t of_spin_mask;
/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
int smp_query_cpu_stopped(unsigned int pcpu)
{
int cpu_status, status;
int qcss_tok = rtas_function_token(RTAS_FN_QUERY_CPU_STOPPED_STATE);
if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk_once(KERN_INFO
"Firmware doesn't support query-cpu-stopped-state\n");
return QCSS_HARDWARE_ERROR;
}
status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
if (status != 0) {
printk(KERN_ERR
"RTAS query-cpu-stopped-state failed: %i\n", status);
return status;
}
return cpu_status;
}
/**
* smp_startup_cpu() - start the given cpu
*
* At boot time, there is nothing to do for primary threads which were
* started from Open Firmware. For anything else, call RTAS with the
* appropriate start location.
*
* Returns:
* 0 - failure
* 1 - success
*/
static inline int smp_startup_cpu(unsigned int lcpu)
{
int status;
unsigned long start_here =
__pa(ppc_function_entry(generic_secondary_smp_init));
unsigned int pcpu;
int start_cpu;
if (cpumask_test_cpu(lcpu, of_spin_mask))
/* Already started by OF and sitting in spin loop */
return 1;
pcpu = get_hard_smp_processor_id(lcpu);
/* Check to see if the CPU out of FW already for kexec */
if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
cpumask_set_cpu(lcpu, of_spin_mask);
return 1;
}
/*
* If the RTAS start-cpu token does not exist then presume the
* cpu is already spinning.
*/
start_cpu = rtas_function_token(RTAS_FN_START_CPU);
if (start_cpu == RTAS_UNKNOWN_SERVICE)
return 1;
status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, pcpu);
if (status != 0) {
printk(KERN_ERR "start-cpu failed: %i\n", status);
return 0;
}
return 1;
}
static void smp_setup_cpu(int cpu)
{
if (xive_enabled())
xive_smp_setup_cpu();
else if (cpu != boot_cpuid)
xics_setup_cpu();
if (firmware_has_feature(FW_FEATURE_SPLPAR))
vpa_init(cpu);
cpumask_clear_cpu(cpu, of_spin_mask);
}
static int smp_pSeries_kick_cpu(int nr)
{
if (nr < 0 || nr >= nr_cpu_ids)
return -EINVAL;
if (!smp_startup_cpu(nr))
return -ENOENT;
/*
* The processor is currently spinning, waiting for the
* cpu_start field to become non-zero After we set cpu_start,
* the processor will continue on to secondary_start
*/
paca_ptrs[nr]->cpu_start = 1;
return 0;
}
static int pseries_smp_prepare_cpu(int cpu)
{
if (xive_enabled())
return xive_smp_prepare_cpu(cpu);
return 0;
}
/* Cause IPI as setup by the interrupt controller (xics or xive) */
static void (*ic_cause_ipi)(int cpu) __ro_after_init;
/* Use msgsndp doorbells target is a sibling, else use interrupt controller */
static void dbell_or_ic_cause_ipi(int cpu)
{
if (doorbell_try_core_ipi(cpu))
return;
ic_cause_ipi(cpu);
}
static int pseries_cause_nmi_ipi(int cpu)
{
int hwcpu;
if (cpu == NMI_IPI_ALL_OTHERS) {
hwcpu = H_SIGNAL_SYS_RESET_ALL_OTHERS;
} else {
if (cpu < 0) {
WARN_ONCE(true, "incorrect cpu parameter %d", cpu);
return 0;
}
hwcpu = get_hard_smp_processor_id(cpu);
}
if (plpar_signal_sys_reset(hwcpu) == H_SUCCESS)
return 1;
return 0;
}
static __init void pSeries_smp_probe(void)
{
if (xive_enabled())
xive_smp_probe();
else
xics_smp_probe();
/* No doorbell facility, must use the interrupt controller for IPIs */
if (!cpu_has_feature(CPU_FTR_DBELL))
return;
/* Doorbells can only be used for IPIs between SMT siblings */
if (!cpu_has_feature(CPU_FTR_SMT))
return;
check_kvm_guest();
if (is_kvm_guest()) {
/*
* KVM emulates doorbells by disabling FSCR[MSGP] so msgsndp
* faults to the hypervisor which then reads the instruction
* from guest memory, which tends to be slower than using XIVE.
*/
if (xive_enabled())
return;
/*
* XICS hcalls aren't as fast, so we can use msgsndp (which
* also helps exercise KVM emulation), however KVM can't
* emulate secure guests because it can't read the instruction
* out of their memory.
*/
if (is_secure_guest())
return;
}
/*
* Under PowerVM, FSCR[MSGP] is enabled as guest vCPU siblings are
* gang scheduled on the same physical core, so doorbells are always
* faster than the interrupt controller, and they can be used by
* secure guests.
*/
ic_cause_ipi = smp_ops->cause_ipi;
smp_ops->cause_ipi = dbell_or_ic_cause_ipi;
}
static struct smp_ops_t pseries_smp_ops = {
.message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
.cause_ipi = NULL, /* Filled at runtime by pSeries_smp_probe() */
.cause_nmi_ipi = pseries_cause_nmi_ipi,
.probe = pSeries_smp_probe,
.prepare_cpu = pseries_smp_prepare_cpu,
.kick_cpu = smp_pSeries_kick_cpu,
.setup_cpu = smp_setup_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
};
/* This is called very early */
void __init smp_init_pseries(void)
{
int i;
pr_debug(" -> smp_init_pSeries()\n");
smp_ops = &pseries_smp_ops;
alloc_bootmem_cpumask_var(&of_spin_mask);
/*
* Mark threads which are still spinning in hold loops
*
* We know prom_init will not have started them if RTAS supports
* query-cpu-stopped-state.
*/
if (rtas_function_token(RTAS_FN_QUERY_CPU_STOPPED_STATE) == RTAS_UNKNOWN_SERVICE) {
if (cpu_has_feature(CPU_FTR_SMT)) {
for_each_present_cpu(i) {
if (cpu_thread_in_core(i) == 0)
cpumask_set_cpu(i, of_spin_mask);
}
} else
cpumask_copy(of_spin_mask, cpu_present_mask);
cpumask_clear_cpu(boot_cpuid, of_spin_mask);
}
pr_debug(" <- smp_init_pSeries()\n");
}
| linux-master | arch/powerpc/platforms/pseries/smp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2010 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/irq.h>
#include <asm/io_event_irq.h>
#include "pseries.h"
/*
* IO event interrupt is a mechanism provided by RTAS to return
* information about hardware error and non-error events. Device
* drivers can register their event handlers to receive events.
* Device drivers are expected to use atomic_notifier_chain_register()
* and atomic_notifier_chain_unregister() to register and unregister
* their event handlers. Since multiple IO event types and scopes
* share an IO event interrupt, the event handlers are called one
* by one until the IO event is claimed by one of the handlers.
* The event handlers are expected to return NOTIFY_OK if the
* event is handled by the event handler or NOTIFY_DONE if the
* event does not belong to the handler.
*
* Usage:
*
* Notifier function:
* #include <asm/io_event_irq.h>
* int event_handler(struct notifier_block *nb, unsigned long val, void *data) {
* p = (struct pseries_io_event_sect_data *) data;
* if (! is_my_event(p->scope, p->event_type)) return NOTIFY_DONE;
* :
* :
* return NOTIFY_OK;
* }
* struct notifier_block event_nb = {
* .notifier_call = event_handler,
* }
*
* Registration:
* atomic_notifier_chain_register(&pseries_ioei_notifier_list, &event_nb);
*
* Unregistration:
* atomic_notifier_chain_unregister(&pseries_ioei_notifier_list, &event_nb);
*/
ATOMIC_NOTIFIER_HEAD(pseries_ioei_notifier_list);
EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list);
static int ioei_check_exception_token;
static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
/**
* Find the data portion of an IO Event section from event log.
* @elog: RTAS error/event log.
*
* Return:
* pointer to a valid IO event section data. NULL if not found.
*/
static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
{
struct pseries_errorlog *sect;
/* We should only ever get called for io-event interrupts, but if
* we do get called for another type then something went wrong so
* make some noise about it.
* RTAS_TYPE_IO only exists in extended event log version 6 or later.
* No need to check event log version.
*/
if (unlikely(rtas_error_type(elog) != RTAS_TYPE_IO)) {
printk_once(KERN_WARNING"io_event_irq: Unexpected event type %d",
rtas_error_type(elog));
return NULL;
}
sect = get_pseries_errorlog(elog, PSERIES_ELOG_SECT_ID_IO_EVENT);
if (unlikely(!sect)) {
printk_once(KERN_WARNING "io_event_irq: RTAS extended event "
"log does not contain an IO Event section. "
"Could be a bug in system firmware!\n");
return NULL;
}
return (struct pseries_io_event *) §->data;
}
/*
* PAPR:
* - check-exception returns the first found error or event and clear that
* error or event so it is reported once.
* - Each interrupt returns one event. If a plateform chooses to report
* multiple events through a single interrupt, it must ensure that the
* interrupt remains asserted until check-exception has been used to
* process all out-standing events for that interrupt.
*
* Implementation notes:
* - Events must be processed in the order they are returned. Hence,
* sequential in nature.
* - The owner of an event is determined by combinations of scope,
* event type, and sub-type. There is no easy way to pre-sort clients
* by scope or event type alone. For example, Torrent ISR route change
* event is reported with scope 0x00 (Not Applicable) rather than
* 0x3B (Torrent-hub). It is better to let the clients to identify
* who owns the event.
*/
static irqreturn_t ioei_interrupt(int irq, void *dev_id)
{
struct pseries_io_event *event;
int rtas_rc;
for (;;) {
rtas_rc = rtas_call(ioei_check_exception_token, 6, 1, NULL,
RTAS_VECTOR_EXTERNAL_INTERRUPT,
virq_to_hw(irq),
RTAS_IO_EVENTS, 1 /* Time Critical */,
__pa(ioei_rtas_buf),
RTAS_DATA_BUF_SIZE);
if (rtas_rc != 0)
break;
event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf);
if (!event)
continue;
atomic_notifier_call_chain(&pseries_ioei_notifier_list,
0, event);
}
return IRQ_HANDLED;
}
static int __init ioei_init(void)
{
struct device_node *np;
ioei_check_exception_token = rtas_function_token(RTAS_FN_CHECK_EXCEPTION);
if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE)
return -ENODEV;
np = of_find_node_by_path("/event-sources/ibm,io-events");
if (np) {
request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT");
pr_info("IBM I/O event interrupts enabled\n");
of_node_put(np);
} else {
return -ENODEV;
}
return 0;
}
machine_subsys_initcall(pseries, ioei_init);
| linux-master | arch/powerpc/platforms/pseries/io_event_irq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support for dynamic reconfiguration for PCI, Memory, and CPU
* Hotplug and Dynamic Logical Partitioning on RPA platforms.
*
* Copyright (C) 2009 Nathan Fontenot
* Copyright (C) 2009 IBM Corporation
*/
#define pr_fmt(fmt) "dlpar: " fmt
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/of.h>
#include "of_helpers.h"
#include "pseries.h"
#include <asm/machdep.h>
#include <linux/uaccess.h>
#include <asm/rtas.h>
#include <asm/rtas-work-area.h>
static struct workqueue_struct *pseries_hp_wq;
struct pseries_hp_work {
struct work_struct work;
struct pseries_hp_errorlog *errlog;
};
struct cc_workarea {
__be32 drc_index;
__be32 zero;
__be32 name_offset;
__be32 prop_length;
__be32 prop_offset;
};
void dlpar_free_cc_property(struct property *prop)
{
kfree(prop->name);
kfree(prop->value);
kfree(prop);
}
static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
{
struct property *prop;
char *name;
char *value;
prop = kzalloc(sizeof(*prop), GFP_KERNEL);
if (!prop)
return NULL;
name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
prop->name = kstrdup(name, GFP_KERNEL);
if (!prop->name) {
dlpar_free_cc_property(prop);
return NULL;
}
prop->length = be32_to_cpu(ccwa->prop_length);
value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
prop->value = kmemdup(value, prop->length, GFP_KERNEL);
if (!prop->value) {
dlpar_free_cc_property(prop);
return NULL;
}
return prop;
}
static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
{
struct device_node *dn;
const char *name;
dn = kzalloc(sizeof(*dn), GFP_KERNEL);
if (!dn)
return NULL;
name = (const char *)ccwa + be32_to_cpu(ccwa->name_offset);
dn->full_name = kstrdup(name, GFP_KERNEL);
if (!dn->full_name) {
kfree(dn);
return NULL;
}
of_node_set_flag(dn, OF_DYNAMIC);
of_node_init(dn);
return dn;
}
static void dlpar_free_one_cc_node(struct device_node *dn)
{
struct property *prop;
while (dn->properties) {
prop = dn->properties;
dn->properties = prop->next;
dlpar_free_cc_property(prop);
}
kfree(dn->full_name);
kfree(dn);
}
void dlpar_free_cc_nodes(struct device_node *dn)
{
if (dn->child)
dlpar_free_cc_nodes(dn->child);
if (dn->sibling)
dlpar_free_cc_nodes(dn->sibling);
dlpar_free_one_cc_node(dn);
}
#define COMPLETE 0
#define NEXT_SIBLING 1
#define NEXT_CHILD 2
#define NEXT_PROPERTY 3
#define PREV_PARENT 4
#define MORE_MEMORY 5
#define ERR_CFG_USE -9003
struct device_node *dlpar_configure_connector(__be32 drc_index,
struct device_node *parent)
{
struct device_node *dn;
struct device_node *first_dn = NULL;
struct device_node *last_dn = NULL;
struct property *property;
struct property *last_property = NULL;
struct cc_workarea *ccwa;
struct rtas_work_area *work_area;
char *data_buf;
int cc_token;
int rc = -1;
cc_token = rtas_function_token(RTAS_FN_IBM_CONFIGURE_CONNECTOR);
if (cc_token == RTAS_UNKNOWN_SERVICE)
return NULL;
work_area = rtas_work_area_alloc(SZ_4K);
data_buf = rtas_work_area_raw_buf(work_area);
ccwa = (struct cc_workarea *)&data_buf[0];
ccwa->drc_index = drc_index;
ccwa->zero = 0;
do {
do {
rc = rtas_call(cc_token, 2, 1, NULL,
rtas_work_area_phys(work_area), NULL);
} while (rtas_busy_delay(rc));
switch (rc) {
case COMPLETE:
break;
case NEXT_SIBLING:
dn = dlpar_parse_cc_node(ccwa);
if (!dn)
goto cc_error;
dn->parent = last_dn->parent;
last_dn->sibling = dn;
last_dn = dn;
break;
case NEXT_CHILD:
dn = dlpar_parse_cc_node(ccwa);
if (!dn)
goto cc_error;
if (!first_dn) {
dn->parent = parent;
first_dn = dn;
} else {
dn->parent = last_dn;
if (last_dn)
last_dn->child = dn;
}
last_dn = dn;
break;
case NEXT_PROPERTY:
property = dlpar_parse_cc_property(ccwa);
if (!property)
goto cc_error;
if (!last_dn->properties)
last_dn->properties = property;
else
last_property->next = property;
last_property = property;
break;
case PREV_PARENT:
last_dn = last_dn->parent;
break;
case MORE_MEMORY:
case ERR_CFG_USE:
default:
printk(KERN_ERR "Unexpected Error (%d) "
"returned from configure-connector\n", rc);
goto cc_error;
}
} while (rc);
cc_error:
rtas_work_area_free(work_area);
if (rc) {
if (first_dn)
dlpar_free_cc_nodes(first_dn);
return NULL;
}
return first_dn;
}
int dlpar_attach_node(struct device_node *dn, struct device_node *parent)
{
int rc;
dn->parent = parent;
rc = of_attach_node(dn);
if (rc) {
printk(KERN_ERR "Failed to add device node %pOF\n", dn);
return rc;
}
return 0;
}
int dlpar_detach_node(struct device_node *dn)
{
struct device_node *child;
int rc;
child = of_get_next_child(dn, NULL);
while (child) {
dlpar_detach_node(child);
child = of_get_next_child(dn, child);
}
rc = of_detach_node(dn);
if (rc)
return rc;
of_node_put(dn);
return 0;
}
#define DR_ENTITY_SENSE 9003
#define DR_ENTITY_PRESENT 1
#define DR_ENTITY_UNUSABLE 2
#define ALLOCATION_STATE 9003
#define ALLOC_UNUSABLE 0
#define ALLOC_USABLE 1
#define ISOLATION_STATE 9001
#define ISOLATE 0
#define UNISOLATE 1
int dlpar_acquire_drc(u32 drc_index)
{
int dr_status, rc;
rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_UNUSABLE)
return -1;
rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
if (rc)
return rc;
rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
if (rc) {
rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
return rc;
}
return 0;
}
int dlpar_release_drc(u32 drc_index)
{
int dr_status, rc;
rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_PRESENT)
return -1;
rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
if (rc)
return rc;
rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
if (rc) {
rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
return rc;
}
return 0;
}
int dlpar_unisolate_drc(u32 drc_index)
{
int dr_status, rc;
rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_PRESENT)
return -1;
rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
return 0;
}
int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
{
int rc;
/* pseries error logs are in BE format, convert to cpu type */
switch (hp_elog->id_type) {
case PSERIES_HP_ELOG_ID_DRC_COUNT:
hp_elog->_drc_u.drc_count =
be32_to_cpu(hp_elog->_drc_u.drc_count);
break;
case PSERIES_HP_ELOG_ID_DRC_INDEX:
hp_elog->_drc_u.drc_index =
be32_to_cpu(hp_elog->_drc_u.drc_index);
break;
case PSERIES_HP_ELOG_ID_DRC_IC:
hp_elog->_drc_u.ic.count =
be32_to_cpu(hp_elog->_drc_u.ic.count);
hp_elog->_drc_u.ic.index =
be32_to_cpu(hp_elog->_drc_u.ic.index);
}
switch (hp_elog->resource) {
case PSERIES_HP_ELOG_RESOURCE_MEM:
rc = dlpar_memory(hp_elog);
break;
case PSERIES_HP_ELOG_RESOURCE_CPU:
rc = dlpar_cpu(hp_elog);
break;
case PSERIES_HP_ELOG_RESOURCE_PMEM:
rc = dlpar_hp_pmem(hp_elog);
break;
default:
pr_warn_ratelimited("Invalid resource (%d) specified\n",
hp_elog->resource);
rc = -EINVAL;
}
return rc;
}
static void pseries_hp_work_fn(struct work_struct *work)
{
struct pseries_hp_work *hp_work =
container_of(work, struct pseries_hp_work, work);
handle_dlpar_errorlog(hp_work->errlog);
kfree(hp_work->errlog);
kfree(work);
}
void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
{
struct pseries_hp_work *work;
struct pseries_hp_errorlog *hp_errlog_copy;
hp_errlog_copy = kmemdup(hp_errlog, sizeof(*hp_errlog), GFP_ATOMIC);
if (!hp_errlog_copy)
return;
work = kmalloc(sizeof(struct pseries_hp_work), GFP_ATOMIC);
if (work) {
INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
work->errlog = hp_errlog_copy;
queue_work(pseries_hp_wq, (struct work_struct *)work);
} else {
kfree(hp_errlog_copy);
}
}
static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
{
char *arg;
arg = strsep(cmd, " ");
if (!arg)
return -EINVAL;
if (sysfs_streq(arg, "memory")) {
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
} else if (sysfs_streq(arg, "cpu")) {
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
} else {
pr_err("Invalid resource specified.\n");
return -EINVAL;
}
return 0;
}
static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
{
char *arg;
arg = strsep(cmd, " ");
if (!arg)
return -EINVAL;
if (sysfs_streq(arg, "add")) {
hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
} else if (sysfs_streq(arg, "remove")) {
hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
} else {
pr_err("Invalid action specified.\n");
return -EINVAL;
}
return 0;
}
static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
{
char *arg;
u32 count, index;
arg = strsep(cmd, " ");
if (!arg)
return -EINVAL;
if (sysfs_streq(arg, "indexed-count")) {
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
arg = strsep(cmd, " ");
if (!arg) {
pr_err("No DRC count specified.\n");
return -EINVAL;
}
if (kstrtou32(arg, 0, &count)) {
pr_err("Invalid DRC count specified.\n");
return -EINVAL;
}
arg = strsep(cmd, " ");
if (!arg) {
pr_err("No DRC Index specified.\n");
return -EINVAL;
}
if (kstrtou32(arg, 0, &index)) {
pr_err("Invalid DRC Index specified.\n");
return -EINVAL;
}
hp_elog->_drc_u.ic.count = cpu_to_be32(count);
hp_elog->_drc_u.ic.index = cpu_to_be32(index);
} else if (sysfs_streq(arg, "index")) {
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
arg = strsep(cmd, " ");
if (!arg) {
pr_err("No DRC Index specified.\n");
return -EINVAL;
}
if (kstrtou32(arg, 0, &index)) {
pr_err("Invalid DRC Index specified.\n");
return -EINVAL;
}
hp_elog->_drc_u.drc_index = cpu_to_be32(index);
} else if (sysfs_streq(arg, "count")) {
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
arg = strsep(cmd, " ");
if (!arg) {
pr_err("No DRC count specified.\n");
return -EINVAL;
}
if (kstrtou32(arg, 0, &count)) {
pr_err("Invalid DRC count specified.\n");
return -EINVAL;
}
hp_elog->_drc_u.drc_count = cpu_to_be32(count);
} else {
pr_err("Invalid id_type specified.\n");
return -EINVAL;
}
return 0;
}
static ssize_t dlpar_store(const struct class *class, const struct class_attribute *attr,
const char *buf, size_t count)
{
struct pseries_hp_errorlog hp_elog;
char *argbuf;
char *args;
int rc;
args = argbuf = kstrdup(buf, GFP_KERNEL);
if (!argbuf)
return -ENOMEM;
/*
* Parse out the request from the user, this will be in the form:
* <resource> <action> <id_type> <id>
*/
rc = dlpar_parse_resource(&args, &hp_elog);
if (rc)
goto dlpar_store_out;
rc = dlpar_parse_action(&args, &hp_elog);
if (rc)
goto dlpar_store_out;
rc = dlpar_parse_id_type(&args, &hp_elog);
if (rc)
goto dlpar_store_out;
rc = handle_dlpar_errorlog(&hp_elog);
dlpar_store_out:
kfree(argbuf);
if (rc)
pr_err("Could not handle DLPAR request \"%s\"\n", buf);
return rc ? rc : count;
}
static ssize_t dlpar_show(const struct class *class, const struct class_attribute *attr,
char *buf)
{
return sprintf(buf, "%s\n", "memory,cpu");
}
static CLASS_ATTR_RW(dlpar);
int __init dlpar_workqueue_init(void)
{
if (pseries_hp_wq)
return 0;
pseries_hp_wq = alloc_ordered_workqueue("pseries hotplug workqueue", 0);
return pseries_hp_wq ? 0 : -ENOMEM;
}
static int __init dlpar_sysfs_init(void)
{
int rc;
rc = dlpar_workqueue_init();
if (rc)
return rc;
return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
}
machine_device_initcall(pseries, dlpar_sysfs_init);
| linux-master | arch/powerpc/platforms/pseries/dlpar.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Secure VM platform
*
* Copyright 2018 IBM Corporation
* Author: Anshuman Khandual <[email protected]>
*/
#include <linux/mm.h>
#include <linux/memblock.h>
#include <linux/cc_platform.h>
#include <asm/machdep.h>
#include <asm/svm.h>
#include <asm/swiotlb.h>
#include <asm/ultravisor.h>
#include <asm/dtl.h>
static int __init init_svm(void)
{
if (!is_secure_guest())
return 0;
/* Don't release the SWIOTLB buffer. */
ppc_swiotlb_enable = 1;
/*
* Since the guest memory is inaccessible to the host, devices always
* need to use the SWIOTLB buffer for DMA even if dma_capable() says
* otherwise.
*/
ppc_swiotlb_flags |= SWIOTLB_ANY | SWIOTLB_FORCE;
/* Share the SWIOTLB buffer with the host. */
swiotlb_update_mem_attributes();
return 0;
}
machine_early_initcall(pseries, init_svm);
int set_memory_encrypted(unsigned long addr, int numpages)
{
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0;
if (!PAGE_ALIGNED(addr))
return -EINVAL;
uv_unshare_page(PHYS_PFN(__pa(addr)), numpages);
return 0;
}
int set_memory_decrypted(unsigned long addr, int numpages)
{
if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return 0;
if (!PAGE_ALIGNED(addr))
return -EINVAL;
uv_share_page(PHYS_PFN(__pa(addr)), numpages);
return 0;
}
/* There's one dispatch log per CPU. */
#define NR_DTL_PAGE (DISPATCH_LOG_BYTES * CONFIG_NR_CPUS / PAGE_SIZE)
static struct page *dtl_page_store[NR_DTL_PAGE];
static long dtl_nr_pages;
static bool is_dtl_page_shared(struct page *page)
{
long i;
for (i = 0; i < dtl_nr_pages; i++)
if (dtl_page_store[i] == page)
return true;
return false;
}
void dtl_cache_ctor(void *addr)
{
unsigned long pfn = PHYS_PFN(__pa(addr));
struct page *page = pfn_to_page(pfn);
if (!is_dtl_page_shared(page)) {
dtl_page_store[dtl_nr_pages] = page;
dtl_nr_pages++;
WARN_ON(dtl_nr_pages >= NR_DTL_PAGE);
uv_share_page(pfn, 1);
}
}
| linux-master | arch/powerpc/platforms/pseries/svm.c |
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "papr-scm: " fmt
#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/ndctl.h>
#include <linux/sched.h>
#include <linux/libnvdimm.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/seq_buf.h>
#include <linux/nd.h>
#include <asm/plpar_wrappers.h>
#include <asm/papr_pdsm.h>
#include <asm/mce.h>
#include <asm/unaligned.h>
#include <linux/perf_event.h>
#define BIND_ANY_ADDR (~0ul)
#define PAPR_SCM_DIMM_CMD_MASK \
((1ul << ND_CMD_GET_CONFIG_SIZE) | \
(1ul << ND_CMD_GET_CONFIG_DATA) | \
(1ul << ND_CMD_SET_CONFIG_DATA) | \
(1ul << ND_CMD_CALL))
/* DIMM health bitmap indicators */
/* SCM device is unable to persist memory contents */
#define PAPR_PMEM_UNARMED (1ULL << (63 - 0))
/* SCM device failed to persist memory contents */
#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1))
/* SCM device contents are persisted from previous IPL */
#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2))
/* SCM device contents are not persisted from previous IPL */
#define PAPR_PMEM_EMPTY (1ULL << (63 - 3))
/* SCM device memory life remaining is critically low */
#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4))
/* SCM device will be garded off next IPL due to failure */
#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5))
/* SCM contents cannot persist due to current platform health status */
#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6))
/* SCM device is unable to persist memory contents in certain conditions */
#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7))
/* SCM device is encrypted */
#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8))
/* SCM device has been scrubbed and locked */
#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9))
/* Bits status indicators for health bitmap indicating unarmed dimm */
#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \
PAPR_PMEM_HEALTH_UNHEALTHY)
/* Bits status indicators for health bitmap indicating unflushed dimm */
#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY)
/* Bits status indicators for health bitmap indicating unrestored dimm */
#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY)
/* Bit status indicators for smart event notification */
#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \
PAPR_PMEM_HEALTH_FATAL | \
PAPR_PMEM_HEALTH_UNHEALTHY)
#define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS)
#define PAPR_SCM_PERF_STATS_VERSION 0x1
/* Struct holding a single performance metric */
struct papr_scm_perf_stat {
u8 stat_id[8];
__be64 stat_val;
} __packed;
/* Struct exchanged between kernel and PHYP for fetching drc perf stats */
struct papr_scm_perf_stats {
u8 eye_catcher[8];
/* Should be PAPR_SCM_PERF_STATS_VERSION */
__be32 stats_version;
/* Number of stats following */
__be32 num_statistics;
/* zero or more performance matrics */
struct papr_scm_perf_stat scm_statistic[];
} __packed;
/* private struct associated with each region */
struct papr_scm_priv {
struct platform_device *pdev;
struct device_node *dn;
uint32_t drc_index;
uint64_t blocks;
uint64_t block_size;
int metadata_size;
bool is_volatile;
bool hcall_flush_required;
uint64_t bound_addr;
struct nvdimm_bus_descriptor bus_desc;
struct nvdimm_bus *bus;
struct nvdimm *nvdimm;
struct resource res;
struct nd_region *region;
struct nd_interleave_set nd_set;
struct list_head region_list;
/* Protect dimm health data from concurrent read/writes */
struct mutex health_mutex;
/* Last time the health information of the dimm was updated */
unsigned long lasthealth_jiffies;
/* Health information for the dimm */
u64 health_bitmap;
/* Holds the last known dirty shutdown counter value */
u64 dirty_shutdown_counter;
/* length of the stat buffer as expected by phyp */
size_t stat_buffer_len;
/* The bits which needs to be overridden */
u64 health_bitmap_inject_mask;
};
static int papr_scm_pmem_flush(struct nd_region *nd_region,
struct bio *bio __maybe_unused)
{
struct papr_scm_priv *p = nd_region_provider_data(nd_region);
unsigned long ret_buf[PLPAR_HCALL_BUFSIZE], token = 0;
long rc;
dev_dbg(&p->pdev->dev, "flush drc 0x%x", p->drc_index);
do {
rc = plpar_hcall(H_SCM_FLUSH, ret_buf, p->drc_index, token);
token = ret_buf[0];
/* Check if we are stalled for some time */
if (H_IS_LONG_BUSY(rc)) {
msleep(get_longbusy_msecs(rc));
rc = H_BUSY;
} else if (rc == H_BUSY) {
cond_resched();
}
} while (rc == H_BUSY);
if (rc) {
dev_err(&p->pdev->dev, "flush error: %ld", rc);
rc = -EIO;
} else {
dev_dbg(&p->pdev->dev, "flush drc 0x%x complete", p->drc_index);
}
return rc;
}
static LIST_HEAD(papr_nd_regions);
static DEFINE_MUTEX(papr_ndr_lock);
static int drc_pmem_bind(struct papr_scm_priv *p)
{
unsigned long ret[PLPAR_HCALL_BUFSIZE];
uint64_t saved = 0;
uint64_t token;
int64_t rc;
/*
* When the hypervisor cannot map all the requested memory in a single
* hcall it returns H_BUSY and we call again with the token until
* we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS
* leave the system in an undefined state, so we wait.
*/
token = 0;
do {
rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
p->blocks, BIND_ANY_ADDR, token);
token = ret[0];
if (!saved)
saved = ret[1];
cond_resched();
} while (rc == H_BUSY);
if (rc)
return rc;
p->bound_addr = saved;
dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n",
p->drc_index, (unsigned long)saved);
return rc;
}
static void drc_pmem_unbind(struct papr_scm_priv *p)
{
unsigned long ret[PLPAR_HCALL_BUFSIZE];
uint64_t token = 0;
int64_t rc;
dev_dbg(&p->pdev->dev, "unbind drc 0x%x\n", p->drc_index);
/* NB: unbind has the same retry requirements as drc_pmem_bind() */
do {
/* Unbind of all SCM resources associated with drcIndex */
rc = plpar_hcall(H_SCM_UNBIND_ALL, ret, H_UNBIND_SCOPE_DRC,
p->drc_index, token);
token = ret[0];
/* Check if we are stalled for some time */
if (H_IS_LONG_BUSY(rc)) {
msleep(get_longbusy_msecs(rc));
rc = H_BUSY;
} else if (rc == H_BUSY) {
cond_resched();
}
} while (rc == H_BUSY);
if (rc)
dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
else
dev_dbg(&p->pdev->dev, "unbind drc 0x%x complete\n",
p->drc_index);
return;
}
static int drc_pmem_query_n_bind(struct papr_scm_priv *p)
{
unsigned long start_addr;
unsigned long end_addr;
unsigned long ret[PLPAR_HCALL_BUFSIZE];
int64_t rc;
rc = plpar_hcall(H_SCM_QUERY_BLOCK_MEM_BINDING, ret,
p->drc_index, 0);
if (rc)
goto err_out;
start_addr = ret[0];
/* Make sure the full region is bound. */
rc = plpar_hcall(H_SCM_QUERY_BLOCK_MEM_BINDING, ret,
p->drc_index, p->blocks - 1);
if (rc)
goto err_out;
end_addr = ret[0];
if ((end_addr - start_addr) != ((p->blocks - 1) * p->block_size))
goto err_out;
p->bound_addr = start_addr;
dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n", p->drc_index, start_addr);
return rc;
err_out:
dev_info(&p->pdev->dev,
"Failed to query, trying an unbind followed by bind");
drc_pmem_unbind(p);
return drc_pmem_bind(p);
}
/*
* Query the Dimm performance stats from PHYP and copy them (if returned) to
* provided struct papr_scm_perf_stats instance 'stats' that can hold atleast
* (num_stats + header) bytes.
* - If buff_stats == NULL the return value is the size in bytes of the buffer
* needed to hold all supported performance-statistics.
* - If buff_stats != NULL and num_stats == 0 then we copy all known
* performance-statistics to 'buff_stat' and expect to be large enough to
* hold them.
* - if buff_stats != NULL and num_stats > 0 then copy the requested
* performance-statistics to buff_stats.
*/
static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
struct papr_scm_perf_stats *buff_stats,
unsigned int num_stats)
{
unsigned long ret[PLPAR_HCALL_BUFSIZE];
size_t size;
s64 rc;
/* Setup the out buffer */
if (buff_stats) {
memcpy(buff_stats->eye_catcher,
PAPR_SCM_PERF_STATS_EYECATCHER, 8);
buff_stats->stats_version =
cpu_to_be32(PAPR_SCM_PERF_STATS_VERSION);
buff_stats->num_statistics =
cpu_to_be32(num_stats);
/*
* Calculate the buffer size based on num-stats provided
* or use the prefetched max buffer length
*/
if (num_stats)
/* Calculate size from the num_stats */
size = sizeof(struct papr_scm_perf_stats) +
num_stats * sizeof(struct papr_scm_perf_stat);
else
size = p->stat_buffer_len;
} else {
/* In case of no out buffer ignore the size */
size = 0;
}
/* Do the HCALL asking PHYP for info */
rc = plpar_hcall(H_SCM_PERFORMANCE_STATS, ret, p->drc_index,
buff_stats ? virt_to_phys(buff_stats) : 0,
size);
/* Check if the error was due to an unknown stat-id */
if (rc == H_PARTIAL) {
dev_err(&p->pdev->dev,
"Unknown performance stats, Err:0x%016lX\n", ret[0]);
return -ENOENT;
} else if (rc == H_AUTHORITY) {
dev_info(&p->pdev->dev,
"Permission denied while accessing performance stats");
return -EPERM;
} else if (rc == H_UNSUPPORTED) {
dev_dbg(&p->pdev->dev, "Performance stats unsupported\n");
return -EOPNOTSUPP;
} else if (rc != H_SUCCESS) {
dev_err(&p->pdev->dev,
"Failed to query performance stats, Err:%lld\n", rc);
return -EIO;
} else if (!size) {
/* Handle case where stat buffer size was requested */
dev_dbg(&p->pdev->dev,
"Performance stats size %ld\n", ret[0]);
return ret[0];
}
/* Successfully fetched the requested stats from phyp */
dev_dbg(&p->pdev->dev,
"Performance stats returned %d stats\n",
be32_to_cpu(buff_stats->num_statistics));
return 0;
}
#ifdef CONFIG_PERF_EVENTS
#define to_nvdimm_pmu(_pmu) container_of(_pmu, struct nvdimm_pmu, pmu)
static const char * const nvdimm_events_map[] = {
[1] = "CtlResCt",
[2] = "CtlResTm",
[3] = "PonSecs ",
[4] = "MemLife ",
[5] = "CritRscU",
[6] = "HostLCnt",
[7] = "HostSCnt",
[8] = "HostSDur",
[9] = "HostLDur",
[10] = "MedRCnt ",
[11] = "MedWCnt ",
[12] = "MedRDur ",
[13] = "MedWDur ",
[14] = "CchRHCnt",
[15] = "CchWHCnt",
[16] = "FastWCnt",
};
static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count)
{
struct papr_scm_perf_stat *stat;
struct papr_scm_perf_stats *stats;
struct papr_scm_priv *p = dev_get_drvdata(dev);
int rc, size;
/* Invalid eventcode */
if (event->attr.config == 0 || event->attr.config >= ARRAY_SIZE(nvdimm_events_map))
return -EINVAL;
/* Allocate request buffer enough to hold single performance stat */
size = sizeof(struct papr_scm_perf_stats) +
sizeof(struct papr_scm_perf_stat);
if (!p)
return -EINVAL;
stats = kzalloc(size, GFP_KERNEL);
if (!stats)
return -ENOMEM;
stat = &stats->scm_statistic[0];
memcpy(&stat->stat_id,
nvdimm_events_map[event->attr.config],
sizeof(stat->stat_id));
stat->stat_val = 0;
rc = drc_pmem_query_stats(p, stats, 1);
if (rc < 0) {
kfree(stats);
return rc;
}
*count = be64_to_cpu(stat->stat_val);
kfree(stats);
return 0;
}
static int papr_scm_pmu_event_init(struct perf_event *event)
{
struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
struct papr_scm_priv *p;
if (!nd_pmu)
return -EINVAL;
/* test the event attr type for PMU enumeration */
if (event->attr.type != event->pmu->type)
return -ENOENT;
/* it does not support event sampling mode */
if (is_sampling_event(event))
return -EOPNOTSUPP;
/* no branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
p = (struct papr_scm_priv *)nd_pmu->dev->driver_data;
if (!p)
return -EINVAL;
/* Invalid eventcode */
if (event->attr.config == 0 || event->attr.config > 16)
return -EINVAL;
return 0;
}
static int papr_scm_pmu_add(struct perf_event *event, int flags)
{
u64 count;
int rc;
struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
if (!nd_pmu)
return -EINVAL;
if (flags & PERF_EF_START) {
rc = papr_scm_pmu_get_value(event, nd_pmu->dev, &count);
if (rc)
return rc;
local64_set(&event->hw.prev_count, count);
}
return 0;
}
static void papr_scm_pmu_read(struct perf_event *event)
{
u64 prev, now;
int rc;
struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
if (!nd_pmu)
return;
rc = papr_scm_pmu_get_value(event, nd_pmu->dev, &now);
if (rc)
return;
prev = local64_xchg(&event->hw.prev_count, now);
local64_add(now - prev, &event->count);
}
static void papr_scm_pmu_del(struct perf_event *event, int flags)
{
papr_scm_pmu_read(event);
}
static void papr_scm_pmu_register(struct papr_scm_priv *p)
{
struct nvdimm_pmu *nd_pmu;
int rc, nodeid;
nd_pmu = kzalloc(sizeof(*nd_pmu), GFP_KERNEL);
if (!nd_pmu) {
rc = -ENOMEM;
goto pmu_err_print;
}
if (!p->stat_buffer_len) {
rc = -ENOENT;
goto pmu_check_events_err;
}
nd_pmu->pmu.task_ctx_nr = perf_invalid_context;
nd_pmu->pmu.name = nvdimm_name(p->nvdimm);
nd_pmu->pmu.event_init = papr_scm_pmu_event_init;
nd_pmu->pmu.read = papr_scm_pmu_read;
nd_pmu->pmu.add = papr_scm_pmu_add;
nd_pmu->pmu.del = papr_scm_pmu_del;
nd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_INTERRUPT |
PERF_PMU_CAP_NO_EXCLUDE;
/*updating the cpumask variable */
nodeid = numa_map_to_online_node(dev_to_node(&p->pdev->dev));
nd_pmu->arch_cpumask = *cpumask_of_node(nodeid);
rc = register_nvdimm_pmu(nd_pmu, p->pdev);
if (rc)
goto pmu_check_events_err;
/*
* Set archdata.priv value to nvdimm_pmu structure, to handle the
* unregistering of pmu device.
*/
p->pdev->archdata.priv = nd_pmu;
return;
pmu_check_events_err:
kfree(nd_pmu);
pmu_err_print:
dev_info(&p->pdev->dev, "nvdimm pmu didn't register rc=%d\n", rc);
}
#else
static void papr_scm_pmu_register(struct papr_scm_priv *p) { }
#endif
/*
* Issue hcall to retrieve dimm health info and populate papr_scm_priv with the
* health information.
*/
static int __drc_pmem_query_health(struct papr_scm_priv *p)
{
unsigned long ret[PLPAR_HCALL_BUFSIZE];
u64 bitmap = 0;
long rc;
/* issue the hcall */
rc = plpar_hcall(H_SCM_HEALTH, ret, p->drc_index);
if (rc == H_SUCCESS)
bitmap = ret[0] & ret[1];
else if (rc == H_FUNCTION)
dev_info_once(&p->pdev->dev,
"Hcall H_SCM_HEALTH not implemented, assuming empty health bitmap");
else {
dev_err(&p->pdev->dev,
"Failed to query health information, Err:%ld\n", rc);
return -ENXIO;
}
p->lasthealth_jiffies = jiffies;
/* Allow injecting specific health bits via inject mask. */
if (p->health_bitmap_inject_mask)
bitmap = (bitmap & ~p->health_bitmap_inject_mask) |
p->health_bitmap_inject_mask;
WRITE_ONCE(p->health_bitmap, bitmap);
dev_dbg(&p->pdev->dev,
"Queried dimm health info. Bitmap:0x%016lx Mask:0x%016lx\n",
ret[0], ret[1]);
return 0;
}
/* Min interval in seconds for assuming stable dimm health */
#define MIN_HEALTH_QUERY_INTERVAL 60
/* Query cached health info and if needed call drc_pmem_query_health */
static int drc_pmem_query_health(struct papr_scm_priv *p)
{
unsigned long cache_timeout;
int rc;
/* Protect concurrent modifications to papr_scm_priv */
rc = mutex_lock_interruptible(&p->health_mutex);
if (rc)
return rc;
/* Jiffies offset for which the health data is assumed to be same */
cache_timeout = p->lasthealth_jiffies +
msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000);
/* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */
if (time_after(jiffies, cache_timeout))
rc = __drc_pmem_query_health(p);
else
/* Assume cached health data is valid */
rc = 0;
mutex_unlock(&p->health_mutex);
return rc;
}
static int papr_scm_meta_get(struct papr_scm_priv *p,
struct nd_cmd_get_config_data_hdr *hdr)
{
unsigned long data[PLPAR_HCALL_BUFSIZE];
unsigned long offset, data_offset;
int len, read;
int64_t ret;
if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
return -EINVAL;
for (len = hdr->in_length; len; len -= read) {
data_offset = hdr->in_length - len;
offset = hdr->in_offset + data_offset;
if (len >= 8)
read = 8;
else if (len >= 4)
read = 4;
else if (len >= 2)
read = 2;
else
read = 1;
ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
offset, read);
if (ret == H_PARAMETER) /* bad DRC index */
return -ENODEV;
if (ret)
return -EINVAL; /* other invalid parameter */
switch (read) {
case 8:
*(uint64_t *)(hdr->out_buf + data_offset) = be64_to_cpu(data[0]);
break;
case 4:
*(uint32_t *)(hdr->out_buf + data_offset) = be32_to_cpu(data[0] & 0xffffffff);
break;
case 2:
*(uint16_t *)(hdr->out_buf + data_offset) = be16_to_cpu(data[0] & 0xffff);
break;
case 1:
*(uint8_t *)(hdr->out_buf + data_offset) = (data[0] & 0xff);
break;
}
}
return 0;
}
static int papr_scm_meta_set(struct papr_scm_priv *p,
struct nd_cmd_set_config_hdr *hdr)
{
unsigned long offset, data_offset;
int len, wrote;
unsigned long data;
__be64 data_be;
int64_t ret;
if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
return -EINVAL;
for (len = hdr->in_length; len; len -= wrote) {
data_offset = hdr->in_length - len;
offset = hdr->in_offset + data_offset;
if (len >= 8) {
data = *(uint64_t *)(hdr->in_buf + data_offset);
data_be = cpu_to_be64(data);
wrote = 8;
} else if (len >= 4) {
data = *(uint32_t *)(hdr->in_buf + data_offset);
data &= 0xffffffff;
data_be = cpu_to_be32(data);
wrote = 4;
} else if (len >= 2) {
data = *(uint16_t *)(hdr->in_buf + data_offset);
data &= 0xffff;
data_be = cpu_to_be16(data);
wrote = 2;
} else {
data_be = *(uint8_t *)(hdr->in_buf + data_offset);
data_be &= 0xff;
wrote = 1;
}
ret = plpar_hcall_norets(H_SCM_WRITE_METADATA, p->drc_index,
offset, data_be, wrote);
if (ret == H_PARAMETER) /* bad DRC index */
return -ENODEV;
if (ret)
return -EINVAL; /* other invalid parameter */
}
return 0;
}
/*
* Do a sanity checks on the inputs args to dimm-control function and return
* '0' if valid. Validation of PDSM payloads happens later in
* papr_scm_service_pdsm.
*/
static int is_cmd_valid(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len)
{
unsigned long cmd_mask = PAPR_SCM_DIMM_CMD_MASK;
struct nd_cmd_pkg *nd_cmd;
struct papr_scm_priv *p;
enum papr_pdsm pdsm;
/* Only dimm-specific calls are supported atm */
if (!nvdimm)
return -EINVAL;
/* get the provider data from struct nvdimm */
p = nvdimm_provider_data(nvdimm);
if (!test_bit(cmd, &cmd_mask)) {
dev_dbg(&p->pdev->dev, "Unsupported cmd=%u\n", cmd);
return -EINVAL;
}
/* For CMD_CALL verify pdsm request */
if (cmd == ND_CMD_CALL) {
/* Verify the envelope and envelop size */
if (!buf ||
buf_len < (sizeof(struct nd_cmd_pkg) + ND_PDSM_HDR_SIZE)) {
dev_dbg(&p->pdev->dev, "Invalid pkg size=%u\n",
buf_len);
return -EINVAL;
}
/* Verify that the nd_cmd_pkg.nd_family is correct */
nd_cmd = (struct nd_cmd_pkg *)buf;
if (nd_cmd->nd_family != NVDIMM_FAMILY_PAPR) {
dev_dbg(&p->pdev->dev, "Invalid pkg family=0x%llx\n",
nd_cmd->nd_family);
return -EINVAL;
}
pdsm = (enum papr_pdsm)nd_cmd->nd_command;
/* Verify if the pdsm command is valid */
if (pdsm <= PAPR_PDSM_MIN || pdsm >= PAPR_PDSM_MAX) {
dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid PDSM\n",
pdsm);
return -EINVAL;
}
/* Have enough space to hold returned 'nd_pkg_pdsm' header */
if (nd_cmd->nd_size_out < ND_PDSM_HDR_SIZE) {
dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid payload\n",
pdsm);
return -EINVAL;
}
}
/* Let the command be further processed */
return 0;
}
static int papr_pdsm_fuel_gauge(struct papr_scm_priv *p,
union nd_pdsm_payload *payload)
{
int rc, size;
u64 statval;
struct papr_scm_perf_stat *stat;
struct papr_scm_perf_stats *stats;
/* Silently fail if fetching performance metrics isn't supported */
if (!p->stat_buffer_len)
return 0;
/* Allocate request buffer enough to hold single performance stat */
size = sizeof(struct papr_scm_perf_stats) +
sizeof(struct papr_scm_perf_stat);
stats = kzalloc(size, GFP_KERNEL);
if (!stats)
return -ENOMEM;
stat = &stats->scm_statistic[0];
memcpy(&stat->stat_id, "MemLife ", sizeof(stat->stat_id));
stat->stat_val = 0;
/* Fetch the fuel gauge and populate it in payload */
rc = drc_pmem_query_stats(p, stats, 1);
if (rc < 0) {
dev_dbg(&p->pdev->dev, "Err(%d) fetching fuel gauge\n", rc);
goto free_stats;
}
statval = be64_to_cpu(stat->stat_val);
dev_dbg(&p->pdev->dev,
"Fetched fuel-gauge %llu", statval);
payload->health.extension_flags |=
PDSM_DIMM_HEALTH_RUN_GAUGE_VALID;
payload->health.dimm_fuel_gauge = statval;
rc = sizeof(struct nd_papr_pdsm_health);
free_stats:
kfree(stats);
return rc;
}
/* Add the dirty-shutdown-counter value to the pdsm */
static int papr_pdsm_dsc(struct papr_scm_priv *p,
union nd_pdsm_payload *payload)
{
payload->health.extension_flags |= PDSM_DIMM_DSC_VALID;
payload->health.dimm_dsc = p->dirty_shutdown_counter;
return sizeof(struct nd_papr_pdsm_health);
}
/* Fetch the DIMM health info and populate it in provided package. */
static int papr_pdsm_health(struct papr_scm_priv *p,
union nd_pdsm_payload *payload)
{
int rc;
/* Ensure dimm health mutex is taken preventing concurrent access */
rc = mutex_lock_interruptible(&p->health_mutex);
if (rc)
goto out;
/* Always fetch upto date dimm health data ignoring cached values */
rc = __drc_pmem_query_health(p);
if (rc) {
mutex_unlock(&p->health_mutex);
goto out;
}
/* update health struct with various flags derived from health bitmap */
payload->health = (struct nd_papr_pdsm_health) {
.extension_flags = 0,
.dimm_unarmed = !!(p->health_bitmap & PAPR_PMEM_UNARMED_MASK),
.dimm_bad_shutdown = !!(p->health_bitmap & PAPR_PMEM_BAD_SHUTDOWN_MASK),
.dimm_bad_restore = !!(p->health_bitmap & PAPR_PMEM_BAD_RESTORE_MASK),
.dimm_scrubbed = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
.dimm_locked = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
.dimm_encrypted = !!(p->health_bitmap & PAPR_PMEM_ENCRYPTED),
.dimm_health = PAPR_PDSM_DIMM_HEALTHY,
};
/* Update field dimm_health based on health_bitmap flags */
if (p->health_bitmap & PAPR_PMEM_HEALTH_FATAL)
payload->health.dimm_health = PAPR_PDSM_DIMM_FATAL;
else if (p->health_bitmap & PAPR_PMEM_HEALTH_CRITICAL)
payload->health.dimm_health = PAPR_PDSM_DIMM_CRITICAL;
else if (p->health_bitmap & PAPR_PMEM_HEALTH_UNHEALTHY)
payload->health.dimm_health = PAPR_PDSM_DIMM_UNHEALTHY;
/* struct populated hence can release the mutex now */
mutex_unlock(&p->health_mutex);
/* Populate the fuel gauge meter in the payload */
papr_pdsm_fuel_gauge(p, payload);
/* Populate the dirty-shutdown-counter field */
papr_pdsm_dsc(p, payload);
rc = sizeof(struct nd_papr_pdsm_health);
out:
return rc;
}
/* Inject a smart error Add the dirty-shutdown-counter value to the pdsm */
static int papr_pdsm_smart_inject(struct papr_scm_priv *p,
union nd_pdsm_payload *payload)
{
int rc;
u32 supported_flags = 0;
u64 inject_mask = 0, clear_mask = 0;
u64 mask;
/* Check for individual smart error flags and update inject/clear masks */
if (payload->smart_inject.flags & PDSM_SMART_INJECT_HEALTH_FATAL) {
supported_flags |= PDSM_SMART_INJECT_HEALTH_FATAL;
if (payload->smart_inject.fatal_enable)
inject_mask |= PAPR_PMEM_HEALTH_FATAL;
else
clear_mask |= PAPR_PMEM_HEALTH_FATAL;
}
if (payload->smart_inject.flags & PDSM_SMART_INJECT_BAD_SHUTDOWN) {
supported_flags |= PDSM_SMART_INJECT_BAD_SHUTDOWN;
if (payload->smart_inject.unsafe_shutdown_enable)
inject_mask |= PAPR_PMEM_SHUTDOWN_DIRTY;
else
clear_mask |= PAPR_PMEM_SHUTDOWN_DIRTY;
}
dev_dbg(&p->pdev->dev, "[Smart-inject] inject_mask=%#llx clear_mask=%#llx\n",
inject_mask, clear_mask);
/* Prevent concurrent access to dimm health bitmap related members */
rc = mutex_lock_interruptible(&p->health_mutex);
if (rc)
return rc;
/* Use inject/clear masks to set health_bitmap_inject_mask */
mask = READ_ONCE(p->health_bitmap_inject_mask);
mask = (mask & ~clear_mask) | inject_mask;
WRITE_ONCE(p->health_bitmap_inject_mask, mask);
/* Invalidate cached health bitmap */
p->lasthealth_jiffies = 0;
mutex_unlock(&p->health_mutex);
/* Return the supported flags back to userspace */
payload->smart_inject.flags = supported_flags;
return sizeof(struct nd_papr_pdsm_health);
}
/*
* 'struct pdsm_cmd_desc'
* Identifies supported PDSMs' expected length of in/out payloads
* and pdsm service function.
*
* size_in : Size of input payload if any in the PDSM request.
* size_out : Size of output payload if any in the PDSM request.
* service : Service function for the PDSM request. Return semantics:
* rc < 0 : Error servicing PDSM and rc indicates the error.
* rc >=0 : Serviced successfully and 'rc' indicate number of
* bytes written to payload.
*/
struct pdsm_cmd_desc {
u32 size_in;
u32 size_out;
int (*service)(struct papr_scm_priv *dimm,
union nd_pdsm_payload *payload);
};
/* Holds all supported PDSMs' command descriptors */
static const struct pdsm_cmd_desc __pdsm_cmd_descriptors[] = {
[PAPR_PDSM_MIN] = {
.size_in = 0,
.size_out = 0,
.service = NULL,
},
/* New PDSM command descriptors to be added below */
[PAPR_PDSM_HEALTH] = {
.size_in = 0,
.size_out = sizeof(struct nd_papr_pdsm_health),
.service = papr_pdsm_health,
},
[PAPR_PDSM_SMART_INJECT] = {
.size_in = sizeof(struct nd_papr_pdsm_smart_inject),
.size_out = sizeof(struct nd_papr_pdsm_smart_inject),
.service = papr_pdsm_smart_inject,
},
/* Empty */
[PAPR_PDSM_MAX] = {
.size_in = 0,
.size_out = 0,
.service = NULL,
},
};
/* Given a valid pdsm cmd return its command descriptor else return NULL */
static inline const struct pdsm_cmd_desc *pdsm_cmd_desc(enum papr_pdsm cmd)
{
if (cmd >= 0 || cmd < ARRAY_SIZE(__pdsm_cmd_descriptors))
return &__pdsm_cmd_descriptors[cmd];
return NULL;
}
/*
* For a given pdsm request call an appropriate service function.
* Returns errors if any while handling the pdsm command package.
*/
static int papr_scm_service_pdsm(struct papr_scm_priv *p,
struct nd_cmd_pkg *pkg)
{
/* Get the PDSM header and PDSM command */
struct nd_pkg_pdsm *pdsm_pkg = (struct nd_pkg_pdsm *)pkg->nd_payload;
enum papr_pdsm pdsm = (enum papr_pdsm)pkg->nd_command;
const struct pdsm_cmd_desc *pdsc;
int rc;
/* Fetch corresponding pdsm descriptor for validation and servicing */
pdsc = pdsm_cmd_desc(pdsm);
/* Validate pdsm descriptor */
/* Ensure that reserved fields are 0 */
if (pdsm_pkg->reserved[0] || pdsm_pkg->reserved[1]) {
dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid reserved field\n",
pdsm);
return -EINVAL;
}
/* If pdsm expects some input, then ensure that the size_in matches */
if (pdsc->size_in &&
pkg->nd_size_in != (pdsc->size_in + ND_PDSM_HDR_SIZE)) {
dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_in=%d\n",
pdsm, pkg->nd_size_in);
return -EINVAL;
}
/* If pdsm wants to return data, then ensure that size_out matches */
if (pdsc->size_out &&
pkg->nd_size_out != (pdsc->size_out + ND_PDSM_HDR_SIZE)) {
dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_out=%d\n",
pdsm, pkg->nd_size_out);
return -EINVAL;
}
/* Service the pdsm */
if (pdsc->service) {
dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Servicing..\n", pdsm);
rc = pdsc->service(p, &pdsm_pkg->payload);
if (rc < 0) {
/* error encountered while servicing pdsm */
pdsm_pkg->cmd_status = rc;
pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
} else {
/* pdsm serviced and 'rc' bytes written to payload */
pdsm_pkg->cmd_status = 0;
pkg->nd_fw_size = ND_PDSM_HDR_SIZE + rc;
}
} else {
dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Unsupported PDSM request\n",
pdsm);
pdsm_pkg->cmd_status = -ENOENT;
pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
}
return pdsm_pkg->cmd_status;
}
static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc)
{
struct nd_cmd_get_config_size *get_size_hdr;
struct nd_cmd_pkg *call_pkg = NULL;
struct papr_scm_priv *p;
int rc;
rc = is_cmd_valid(nvdimm, cmd, buf, buf_len);
if (rc) {
pr_debug("Invalid cmd=0x%x. Err=%d\n", cmd, rc);
return rc;
}
/* Use a local variable in case cmd_rc pointer is NULL */
if (!cmd_rc)
cmd_rc = &rc;
p = nvdimm_provider_data(nvdimm);
switch (cmd) {
case ND_CMD_GET_CONFIG_SIZE:
get_size_hdr = buf;
get_size_hdr->status = 0;
get_size_hdr->max_xfer = 8;
get_size_hdr->config_size = p->metadata_size;
*cmd_rc = 0;
break;
case ND_CMD_GET_CONFIG_DATA:
*cmd_rc = papr_scm_meta_get(p, buf);
break;
case ND_CMD_SET_CONFIG_DATA:
*cmd_rc = papr_scm_meta_set(p, buf);
break;
case ND_CMD_CALL:
call_pkg = (struct nd_cmd_pkg *)buf;
*cmd_rc = papr_scm_service_pdsm(p, call_pkg);
break;
default:
dev_dbg(&p->pdev->dev, "Unknown command = %d\n", cmd);
return -EINVAL;
}
dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc);
return 0;
}
static ssize_t health_bitmap_inject_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct nvdimm *dimm = to_nvdimm(dev);
struct papr_scm_priv *p = nvdimm_provider_data(dimm);
return sprintf(buf, "%#llx\n",
READ_ONCE(p->health_bitmap_inject_mask));
}
static DEVICE_ATTR_ADMIN_RO(health_bitmap_inject);
static ssize_t perf_stats_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int index;
ssize_t rc;
struct seq_buf s;
struct papr_scm_perf_stat *stat;
struct papr_scm_perf_stats *stats;
struct nvdimm *dimm = to_nvdimm(dev);
struct papr_scm_priv *p = nvdimm_provider_data(dimm);
if (!p->stat_buffer_len)
return -ENOENT;
/* Allocate the buffer for phyp where stats are written */
stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
if (!stats)
return -ENOMEM;
/* Ask phyp to return all dimm perf stats */
rc = drc_pmem_query_stats(p, stats, 0);
if (rc)
goto free_stats;
/*
* Go through the returned output buffer and print stats and
* values. Since stat_id is essentially a char string of
* 8 bytes, simply use the string format specifier to print it.
*/
seq_buf_init(&s, buf, PAGE_SIZE);
for (index = 0, stat = stats->scm_statistic;
index < be32_to_cpu(stats->num_statistics);
++index, ++stat) {
seq_buf_printf(&s, "%.8s = 0x%016llX\n",
stat->stat_id,
be64_to_cpu(stat->stat_val));
}
free_stats:
kfree(stats);
return rc ? rc : (ssize_t)seq_buf_used(&s);
}
static DEVICE_ATTR_ADMIN_RO(perf_stats);
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm *dimm = to_nvdimm(dev);
struct papr_scm_priv *p = nvdimm_provider_data(dimm);
struct seq_buf s;
u64 health;
int rc;
rc = drc_pmem_query_health(p);
if (rc)
return rc;
/* Copy health_bitmap locally, check masks & update out buffer */
health = READ_ONCE(p->health_bitmap);
seq_buf_init(&s, buf, PAGE_SIZE);
if (health & PAPR_PMEM_UNARMED_MASK)
seq_buf_printf(&s, "not_armed ");
if (health & PAPR_PMEM_BAD_SHUTDOWN_MASK)
seq_buf_printf(&s, "flush_fail ");
if (health & PAPR_PMEM_BAD_RESTORE_MASK)
seq_buf_printf(&s, "restore_fail ");
if (health & PAPR_PMEM_ENCRYPTED)
seq_buf_printf(&s, "encrypted ");
if (health & PAPR_PMEM_SMART_EVENT_MASK)
seq_buf_printf(&s, "smart_notify ");
if (health & PAPR_PMEM_SCRUBBED_AND_LOCKED)
seq_buf_printf(&s, "scrubbed locked ");
if (seq_buf_used(&s))
seq_buf_printf(&s, "\n");
return seq_buf_used(&s);
}
DEVICE_ATTR_RO(flags);
static ssize_t dirty_shutdown_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nvdimm *dimm = to_nvdimm(dev);
struct papr_scm_priv *p = nvdimm_provider_data(dimm);
return sysfs_emit(buf, "%llu\n", p->dirty_shutdown_counter);
}
DEVICE_ATTR_RO(dirty_shutdown);
static umode_t papr_nd_attribute_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct nvdimm *nvdimm = to_nvdimm(dev);
struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
/* For if perf-stats not available remove perf_stats sysfs */
if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
return 0;
return attr->mode;
}
/* papr_scm specific dimm attributes */
static struct attribute *papr_nd_attributes[] = {
&dev_attr_flags.attr,
&dev_attr_perf_stats.attr,
&dev_attr_dirty_shutdown.attr,
&dev_attr_health_bitmap_inject.attr,
NULL,
};
static const struct attribute_group papr_nd_attribute_group = {
.name = "papr",
.is_visible = papr_nd_attribute_visible,
.attrs = papr_nd_attributes,
};
static const struct attribute_group *papr_nd_attr_groups[] = {
&papr_nd_attribute_group,
NULL,
};
static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
{
struct device *dev = &p->pdev->dev;
struct nd_mapping_desc mapping;
struct nd_region_desc ndr_desc;
unsigned long dimm_flags;
int target_nid, online_nid;
p->bus_desc.ndctl = papr_scm_ndctl;
p->bus_desc.module = THIS_MODULE;
p->bus_desc.of_node = p->pdev->dev.of_node;
p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
/* Set the dimm command family mask to accept PDSMs */
set_bit(NVDIMM_FAMILY_PAPR, &p->bus_desc.dimm_family_mask);
if (!p->bus_desc.provider_name)
return -ENOMEM;
p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
if (!p->bus) {
dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
kfree(p->bus_desc.provider_name);
return -ENXIO;
}
dimm_flags = 0;
set_bit(NDD_LABELING, &dimm_flags);
/*
* Check if the nvdimm is unarmed. No locking needed as we are still
* initializing. Ignore error encountered if any.
*/
__drc_pmem_query_health(p);
if (p->health_bitmap & PAPR_PMEM_UNARMED_MASK)
set_bit(NDD_UNARMED, &dimm_flags);
p->nvdimm = nvdimm_create(p->bus, p, papr_nd_attr_groups,
dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
if (!p->nvdimm) {
dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
goto err;
}
if (nvdimm_bus_check_dimm_count(p->bus, 1))
goto err;
/* now add the region */
memset(&mapping, 0, sizeof(mapping));
mapping.nvdimm = p->nvdimm;
mapping.start = 0;
mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
memset(&ndr_desc, 0, sizeof(ndr_desc));
target_nid = dev_to_node(&p->pdev->dev);
online_nid = numa_map_to_online_node(target_nid);
ndr_desc.numa_node = online_nid;
ndr_desc.target_node = target_nid;
ndr_desc.res = &p->res;
ndr_desc.of_node = p->dn;
ndr_desc.provider_data = p;
ndr_desc.mapping = &mapping;
ndr_desc.num_mappings = 1;
ndr_desc.nd_set = &p->nd_set;
if (p->hcall_flush_required) {
set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
ndr_desc.flush = papr_scm_pmem_flush;
}
if (p->is_volatile)
p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
else {
set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
}
if (!p->region) {
dev_err(dev, "Error registering region %pR from %pOF\n",
ndr_desc.res, p->dn);
goto err;
}
if (target_nid != online_nid)
dev_info(dev, "Region registered with target node %d and online node %d",
target_nid, online_nid);
mutex_lock(&papr_ndr_lock);
list_add_tail(&p->region_list, &papr_nd_regions);
mutex_unlock(&papr_ndr_lock);
return 0;
err: nvdimm_bus_unregister(p->bus);
kfree(p->bus_desc.provider_name);
return -ENXIO;
}
static void papr_scm_add_badblock(struct nd_region *region,
struct nvdimm_bus *bus, u64 phys_addr)
{
u64 aligned_addr = ALIGN_DOWN(phys_addr, L1_CACHE_BYTES);
if (nvdimm_bus_add_badrange(bus, aligned_addr, L1_CACHE_BYTES)) {
pr_err("Bad block registration for 0x%llx failed\n", phys_addr);
return;
}
pr_debug("Add memory range (0x%llx - 0x%llx) as bad range\n",
aligned_addr, aligned_addr + L1_CACHE_BYTES);
nvdimm_region_notify(region, NVDIMM_REVALIDATE_POISON);
}
static int handle_mce_ue(struct notifier_block *nb, unsigned long val,
void *data)
{
struct machine_check_event *evt = data;
struct papr_scm_priv *p;
u64 phys_addr;
bool found = false;
if (evt->error_type != MCE_ERROR_TYPE_UE)
return NOTIFY_DONE;
if (list_empty(&papr_nd_regions))
return NOTIFY_DONE;
/*
* The physical address obtained here is PAGE_SIZE aligned, so get the
* exact address from the effective address
*/
phys_addr = evt->u.ue_error.physical_address +
(evt->u.ue_error.effective_address & ~PAGE_MASK);
if (!evt->u.ue_error.physical_address_provided ||
!is_zone_device_page(pfn_to_page(phys_addr >> PAGE_SHIFT)))
return NOTIFY_DONE;
/* mce notifier is called from a process context, so mutex is safe */
mutex_lock(&papr_ndr_lock);
list_for_each_entry(p, &papr_nd_regions, region_list) {
if (phys_addr >= p->res.start && phys_addr <= p->res.end) {
found = true;
break;
}
}
if (found)
papr_scm_add_badblock(p->region, p->bus, phys_addr);
mutex_unlock(&papr_ndr_lock);
return found ? NOTIFY_OK : NOTIFY_DONE;
}
static struct notifier_block mce_ue_nb = {
.notifier_call = handle_mce_ue
};
static int papr_scm_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
u32 drc_index, metadata_size;
u64 blocks, block_size;
struct papr_scm_priv *p;
u8 uuid_raw[UUID_SIZE];
const char *uuid_str;
ssize_t stat_size;
uuid_t uuid;
int rc;
/* check we have all the required DT properties */
if (of_property_read_u32(dn, "ibm,my-drc-index", &drc_index)) {
dev_err(&pdev->dev, "%pOF: missing drc-index!\n", dn);
return -ENODEV;
}
if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
return -ENODEV;
}
if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
return -ENODEV;
}
if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
return -ENODEV;
}
/*
* open firmware platform device create won't update the NUMA
* distance table. For PAPR SCM devices we use numa_map_to_online_node()
* to find the nearest online NUMA node and that requires correct
* distance table information.
*/
update_numa_distance(dn);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
/* Initialize the dimm mutex */
mutex_init(&p->health_mutex);
/* optional DT properties */
of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
p->dn = dn;
p->drc_index = drc_index;
p->block_size = block_size;
p->blocks = blocks;
p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
p->hcall_flush_required = of_property_read_bool(dn, "ibm,hcall-flush-required");
if (of_property_read_u64(dn, "ibm,persistence-failed-count",
&p->dirty_shutdown_counter))
p->dirty_shutdown_counter = 0;
/* We just need to ensure that set cookies are unique across */
uuid_parse(uuid_str, &uuid);
/*
* The cookie1 and cookie2 are not really little endian.
* We store a raw buffer representation of the
* uuid string so that we can compare this with the label
* area cookie irrespective of the endian configuration
* with which the kernel is built.
*
* Historically we stored the cookie in the below format.
* for a uuid string 72511b67-0b3b-42fd-8d1d-5be3cae8bcaa
* cookie1 was 0xfd423b0b671b5172
* cookie2 was 0xaabce8cae35b1d8d
*/
export_uuid(uuid_raw, &uuid);
p->nd_set.cookie1 = get_unaligned_le64(&uuid_raw[0]);
p->nd_set.cookie2 = get_unaligned_le64(&uuid_raw[8]);
/* might be zero */
p->metadata_size = metadata_size;
p->pdev = pdev;
/* request the hypervisor to bind this region to somewhere in memory */
rc = drc_pmem_bind(p);
/* If phyp says drc memory still bound then force unbound and retry */
if (rc == H_OVERLAP)
rc = drc_pmem_query_n_bind(p);
if (rc != H_SUCCESS) {
dev_err(&p->pdev->dev, "bind err: %d\n", rc);
rc = -ENXIO;
goto err;
}
/* setup the resource for the newly bound range */
p->res.start = p->bound_addr;
p->res.end = p->bound_addr + p->blocks * p->block_size - 1;
p->res.name = pdev->name;
p->res.flags = IORESOURCE_MEM;
/* Try retrieving the stat buffer and see if its supported */
stat_size = drc_pmem_query_stats(p, NULL, 0);
if (stat_size > 0) {
p->stat_buffer_len = stat_size;
dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
p->stat_buffer_len);
}
rc = papr_scm_nvdimm_init(p);
if (rc)
goto err2;
platform_set_drvdata(pdev, p);
papr_scm_pmu_register(p);
return 0;
err2: drc_pmem_unbind(p);
err: kfree(p);
return rc;
}
static int papr_scm_remove(struct platform_device *pdev)
{
struct papr_scm_priv *p = platform_get_drvdata(pdev);
mutex_lock(&papr_ndr_lock);
list_del(&p->region_list);
mutex_unlock(&papr_ndr_lock);
nvdimm_bus_unregister(p->bus);
drc_pmem_unbind(p);
if (pdev->archdata.priv)
unregister_nvdimm_pmu(pdev->archdata.priv);
pdev->archdata.priv = NULL;
kfree(p->bus_desc.provider_name);
kfree(p);
return 0;
}
static const struct of_device_id papr_scm_match[] = {
{ .compatible = "ibm,pmemory" },
{ .compatible = "ibm,pmemory-v2" },
{ },
};
static struct platform_driver papr_scm_driver = {
.probe = papr_scm_probe,
.remove = papr_scm_remove,
.driver = {
.name = "papr_scm",
.of_match_table = papr_scm_match,
},
};
static int __init papr_scm_init(void)
{
int ret;
ret = platform_driver_register(&papr_scm_driver);
if (!ret)
mce_register_notifier(&mce_ue_nb);
return ret;
}
module_init(papr_scm_init);
static void __exit papr_scm_exit(void)
{
mce_unregister_notifier(&mce_ue_nb);
platform_driver_unregister(&papr_scm_driver);
}
module_exit(papr_scm_exit);
MODULE_DEVICE_TABLE(of, papr_scm_match);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
| linux-master | arch/powerpc/platforms/pseries/papr_scm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2013, Michael Ellerman, IBM Corporation.
*/
#define pr_fmt(fmt) "pseries-rng: " fmt
#include <linux/kernel.h>
#include <linux/of.h>
#include <asm/archrandom.h>
#include <asm/machdep.h>
#include <asm/plpar_wrappers.h>
#include "pseries.h"
static int pseries_get_random_long(unsigned long *v)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
if (plpar_hcall(H_RANDOM, retbuf) == H_SUCCESS) {
*v = retbuf[0];
return 1;
}
return 0;
}
void __init pseries_rng_init(void)
{
struct device_node *dn;
dn = of_find_compatible_node(NULL, NULL, "ibm,random");
if (!dn)
return;
ppc_md.get_random_seed = pseries_get_random_long;
of_node_put(dn);
}
| linux-master | arch/powerpc/platforms/pseries/rng.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Platform energy and frequency attributes driver
*
* This driver creates a sys file at /sys/firmware/papr/ which encapsulates a
* directory structure containing files in keyword - value pairs that specify
* energy and frequency configuration of the system.
*
* The format of exposing the sysfs information is as follows:
* /sys/firmware/papr/energy_scale_info/
* |-- <id>/
* |-- desc
* |-- value
* |-- value_desc (if exists)
* |-- <id>/
* |-- desc
* |-- value
* |-- value_desc (if exists)
*
* Copyright 2022 IBM Corp.
*/
#include <asm/hvcall.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include "pseries.h"
/*
* Flag attributes to fetch either all or one attribute from the HCALL
* flag = BE(0) => fetch all attributes with firstAttributeId = 0
* flag = BE(1) => fetch a single attribute with firstAttributeId = id
*/
#define ESI_FLAGS_ALL 0
#define ESI_FLAGS_SINGLE (1ull << 63)
#define KOBJ_MAX_ATTRS 3
#define ESI_HDR_SIZE sizeof(struct h_energy_scale_info_hdr)
#define ESI_ATTR_SIZE sizeof(struct energy_scale_attribute)
#define CURR_MAX_ESI_ATTRS 8
struct energy_scale_attribute {
__be64 id;
__be64 val;
u8 desc[64];
u8 value_desc[64];
} __packed;
struct h_energy_scale_info_hdr {
__be64 num_attrs;
__be64 array_offset;
u8 data_header_version;
} __packed;
struct papr_attr {
u64 id;
struct kobj_attribute kobj_attr;
};
struct papr_group {
struct attribute_group pg;
struct papr_attr pgattrs[KOBJ_MAX_ATTRS];
};
static struct papr_group *papr_groups;
/* /sys/firmware/papr */
static struct kobject *papr_kobj;
/* /sys/firmware/papr/energy_scale_info */
static struct kobject *esi_kobj;
/*
* Energy modes can change dynamically hence making a new hcall each time the
* information needs to be retrieved
*/
static int papr_get_attr(u64 id, struct energy_scale_attribute *esi)
{
int esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * ESI_ATTR_SIZE);
int ret, max_esi_attrs = CURR_MAX_ESI_ATTRS;
struct energy_scale_attribute *curr_esi;
struct h_energy_scale_info_hdr *hdr;
char *buf;
buf = kmalloc(esi_buf_size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
retry:
ret = plpar_hcall_norets(H_GET_ENERGY_SCALE_INFO, ESI_FLAGS_SINGLE,
id, virt_to_phys(buf),
esi_buf_size);
/*
* If the hcall fails with not enough memory for either the
* header or data, attempt to allocate more
*/
if (ret == H_PARTIAL || ret == H_P4) {
char *temp_buf;
max_esi_attrs += 4;
esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * max_esi_attrs);
temp_buf = krealloc(buf, esi_buf_size, GFP_KERNEL);
if (temp_buf)
buf = temp_buf;
else
return -ENOMEM;
goto retry;
}
if (ret != H_SUCCESS) {
pr_warn("hcall failed: H_GET_ENERGY_SCALE_INFO");
ret = -EIO;
goto out_buf;
}
hdr = (struct h_energy_scale_info_hdr *) buf;
curr_esi = (struct energy_scale_attribute *)
(buf + be64_to_cpu(hdr->array_offset));
if (esi_buf_size <
be64_to_cpu(hdr->array_offset) + (be64_to_cpu(hdr->num_attrs)
* sizeof(struct energy_scale_attribute))) {
ret = -EIO;
goto out_buf;
}
*esi = *curr_esi;
out_buf:
kfree(buf);
return ret;
}
/*
* Extract and export the description of the energy scale attributes
*/
static ssize_t desc_show(struct kobject *kobj,
struct kobj_attribute *kobj_attr,
char *buf)
{
struct papr_attr *pattr = container_of(kobj_attr, struct papr_attr,
kobj_attr);
struct energy_scale_attribute esi;
int ret;
ret = papr_get_attr(pattr->id, &esi);
if (ret)
return ret;
return sysfs_emit(buf, "%s\n", esi.desc);
}
/*
* Extract and export the numeric value of the energy scale attributes
*/
static ssize_t val_show(struct kobject *kobj,
struct kobj_attribute *kobj_attr,
char *buf)
{
struct papr_attr *pattr = container_of(kobj_attr, struct papr_attr,
kobj_attr);
struct energy_scale_attribute esi;
int ret;
ret = papr_get_attr(pattr->id, &esi);
if (ret)
return ret;
return sysfs_emit(buf, "%llu\n", be64_to_cpu(esi.val));
}
/*
* Extract and export the value description in string format of the energy
* scale attributes
*/
static ssize_t val_desc_show(struct kobject *kobj,
struct kobj_attribute *kobj_attr,
char *buf)
{
struct papr_attr *pattr = container_of(kobj_attr, struct papr_attr,
kobj_attr);
struct energy_scale_attribute esi;
int ret;
ret = papr_get_attr(pattr->id, &esi);
if (ret)
return ret;
return sysfs_emit(buf, "%s\n", esi.value_desc);
}
static struct papr_ops_info {
const char *attr_name;
ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *kobj_attr,
char *buf);
} ops_info[KOBJ_MAX_ATTRS] = {
{ "desc", desc_show },
{ "value", val_show },
{ "value_desc", val_desc_show },
};
static void add_attr(u64 id, int index, struct papr_attr *attr)
{
attr->id = id;
sysfs_attr_init(&attr->kobj_attr.attr);
attr->kobj_attr.attr.name = ops_info[index].attr_name;
attr->kobj_attr.attr.mode = 0444;
attr->kobj_attr.show = ops_info[index].show;
}
static int add_attr_group(u64 id, struct papr_group *pg, bool show_val_desc)
{
int i;
for (i = 0; i < KOBJ_MAX_ATTRS; i++) {
if (!strcmp(ops_info[i].attr_name, "value_desc") &&
!show_val_desc) {
continue;
}
add_attr(id, i, &pg->pgattrs[i]);
pg->pg.attrs[i] = &pg->pgattrs[i].kobj_attr.attr;
}
return sysfs_create_group(esi_kobj, &pg->pg);
}
static int __init papr_init(void)
{
int esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * ESI_ATTR_SIZE);
int ret, idx, i, max_esi_attrs = CURR_MAX_ESI_ATTRS;
struct h_energy_scale_info_hdr *esi_hdr;
struct energy_scale_attribute *esi_attrs;
uint64_t num_attrs;
char *esi_buf;
if (!firmware_has_feature(FW_FEATURE_LPAR) ||
!firmware_has_feature(FW_FEATURE_ENERGY_SCALE_INFO)) {
return -ENXIO;
}
esi_buf = kmalloc(esi_buf_size, GFP_KERNEL);
if (esi_buf == NULL)
return -ENOMEM;
/*
* hcall(
* uint64 H_GET_ENERGY_SCALE_INFO, // Get energy scale info
* uint64 flags, // Per the flag request
* uint64 firstAttributeId, // The attribute id
* uint64 bufferAddress, // Guest physical address of the output buffer
* uint64 bufferSize); // The size in bytes of the output buffer
*/
retry:
ret = plpar_hcall_norets(H_GET_ENERGY_SCALE_INFO, ESI_FLAGS_ALL, 0,
virt_to_phys(esi_buf), esi_buf_size);
/*
* If the hcall fails with not enough memory for either the
* header or data, attempt to allocate more
*/
if (ret == H_PARTIAL || ret == H_P4) {
char *temp_esi_buf;
max_esi_attrs += 4;
esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * max_esi_attrs);
temp_esi_buf = krealloc(esi_buf, esi_buf_size, GFP_KERNEL);
if (temp_esi_buf)
esi_buf = temp_esi_buf;
else
return -ENOMEM;
goto retry;
}
if (ret != H_SUCCESS) {
pr_warn("hcall failed: H_GET_ENERGY_SCALE_INFO, ret: %d\n", ret);
goto out_free_esi_buf;
}
esi_hdr = (struct h_energy_scale_info_hdr *) esi_buf;
num_attrs = be64_to_cpu(esi_hdr->num_attrs);
esi_attrs = (struct energy_scale_attribute *)
(esi_buf + be64_to_cpu(esi_hdr->array_offset));
if (esi_buf_size <
be64_to_cpu(esi_hdr->array_offset) +
(num_attrs * sizeof(struct energy_scale_attribute))) {
goto out_free_esi_buf;
}
papr_groups = kcalloc(num_attrs, sizeof(*papr_groups), GFP_KERNEL);
if (!papr_groups)
goto out_free_esi_buf;
papr_kobj = kobject_create_and_add("papr", firmware_kobj);
if (!papr_kobj) {
pr_warn("kobject_create_and_add papr failed\n");
goto out_papr_groups;
}
esi_kobj = kobject_create_and_add("energy_scale_info", papr_kobj);
if (!esi_kobj) {
pr_warn("kobject_create_and_add energy_scale_info failed\n");
goto out_kobj;
}
/* Allocate the groups before registering */
for (idx = 0; idx < num_attrs; idx++) {
papr_groups[idx].pg.attrs = kcalloc(KOBJ_MAX_ATTRS + 1,
sizeof(*papr_groups[idx].pg.attrs),
GFP_KERNEL);
if (!papr_groups[idx].pg.attrs)
goto out_pgattrs;
papr_groups[idx].pg.name = kasprintf(GFP_KERNEL, "%lld",
be64_to_cpu(esi_attrs[idx].id));
if (papr_groups[idx].pg.name == NULL)
goto out_pgattrs;
}
for (idx = 0; idx < num_attrs; idx++) {
bool show_val_desc = true;
/* Do not add the value desc attr if it does not exist */
if (strnlen(esi_attrs[idx].value_desc,
sizeof(esi_attrs[idx].value_desc)) == 0)
show_val_desc = false;
if (add_attr_group(be64_to_cpu(esi_attrs[idx].id),
&papr_groups[idx],
show_val_desc)) {
pr_warn("Failed to create papr attribute group %s\n",
papr_groups[idx].pg.name);
idx = num_attrs;
goto out_pgattrs;
}
}
kfree(esi_buf);
return 0;
out_pgattrs:
for (i = 0; i < idx ; i++) {
kfree(papr_groups[i].pg.attrs);
kfree(papr_groups[i].pg.name);
}
kobject_put(esi_kobj);
out_kobj:
kobject_put(papr_kobj);
out_papr_groups:
kfree(papr_groups);
out_free_esi_buf:
kfree(esi_buf);
return -ENOMEM;
}
machine_device_initcall(pseries, papr_init);
| linux-master | arch/powerpc/platforms/pseries/papr_platform_attributes.c |
// SPDX-License-Identifier: GPL-2.0-only
// Secure variable implementation using the PowerVM LPAR Platform KeyStore (PLPKS)
//
// Copyright 2022, 2023 IBM Corporation
// Authors: Russell Currey
// Andrew Donnellan
// Nayna Jain
#define pr_fmt(fmt) "secvar: "fmt
#include <linux/printk.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/kobject.h>
#include <linux/nls.h>
#include <asm/machdep.h>
#include <asm/secvar.h>
#include <asm/plpks.h>
// Config attributes for sysfs
#define PLPKS_CONFIG_ATTR(name, fmt, func) \
static ssize_t name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, \
char *buf) \
{ \
return sysfs_emit(buf, fmt, func()); \
} \
static struct kobj_attribute attr_##name = __ATTR_RO(name)
PLPKS_CONFIG_ATTR(version, "%u\n", plpks_get_version);
PLPKS_CONFIG_ATTR(max_object_size, "%u\n", plpks_get_maxobjectsize);
PLPKS_CONFIG_ATTR(total_size, "%u\n", plpks_get_totalsize);
PLPKS_CONFIG_ATTR(used_space, "%u\n", plpks_get_usedspace);
PLPKS_CONFIG_ATTR(supported_policies, "%08x\n", plpks_get_supportedpolicies);
PLPKS_CONFIG_ATTR(signed_update_algorithms, "%016llx\n", plpks_get_signedupdatealgorithms);
static const struct attribute *config_attrs[] = {
&attr_version.attr,
&attr_max_object_size.attr,
&attr_total_size.attr,
&attr_used_space.attr,
&attr_supported_policies.attr,
&attr_signed_update_algorithms.attr,
NULL,
};
static u32 get_policy(const char *name)
{
if ((strcmp(name, "db") == 0) ||
(strcmp(name, "dbx") == 0) ||
(strcmp(name, "grubdb") == 0) ||
(strcmp(name, "grubdbx") == 0) ||
(strcmp(name, "sbat") == 0))
return (PLPKS_WORLDREADABLE | PLPKS_SIGNEDUPDATE);
else
return PLPKS_SIGNEDUPDATE;
}
static const char * const plpks_var_names[] = {
"PK",
"KEK",
"db",
"dbx",
"grubdb",
"grubdbx",
"sbat",
"moduledb",
"trustedcadb",
NULL,
};
static int plpks_get_variable(const char *key, u64 key_len, u8 *data,
u64 *data_size)
{
struct plpks_var var = {0};
int rc = 0;
// We subtract 1 from key_len because we don't need to include the
// null terminator at the end of the string
var.name = kcalloc(key_len - 1, sizeof(wchar_t), GFP_KERNEL);
if (!var.name)
return -ENOMEM;
rc = utf8s_to_utf16s(key, key_len - 1, UTF16_LITTLE_ENDIAN, (wchar_t *)var.name,
key_len - 1);
if (rc < 0)
goto err;
var.namelen = rc * 2;
var.os = PLPKS_VAR_LINUX;
if (data) {
var.data = data;
var.datalen = *data_size;
}
rc = plpks_read_os_var(&var);
if (rc)
goto err;
*data_size = var.datalen;
err:
kfree(var.name);
if (rc && rc != -ENOENT) {
pr_err("Failed to read variable '%s': %d\n", key, rc);
// Return -EIO since userspace probably doesn't care about the
// specific error
rc = -EIO;
}
return rc;
}
static int plpks_set_variable(const char *key, u64 key_len, u8 *data,
u64 data_size)
{
struct plpks_var var = {0};
int rc = 0;
u64 flags;
// Secure variables need to be prefixed with 8 bytes of flags.
// We only want to perform the write if we have at least one byte of data.
if (data_size <= sizeof(flags))
return -EINVAL;
// We subtract 1 from key_len because we don't need to include the
// null terminator at the end of the string
var.name = kcalloc(key_len - 1, sizeof(wchar_t), GFP_KERNEL);
if (!var.name)
return -ENOMEM;
rc = utf8s_to_utf16s(key, key_len - 1, UTF16_LITTLE_ENDIAN, (wchar_t *)var.name,
key_len - 1);
if (rc < 0)
goto err;
var.namelen = rc * 2;
// Flags are contained in the first 8 bytes of the buffer, and are always big-endian
flags = be64_to_cpup((__be64 *)data);
var.datalen = data_size - sizeof(flags);
var.data = data + sizeof(flags);
var.os = PLPKS_VAR_LINUX;
var.policy = get_policy(key);
// Unlike in the read case, the plpks error code can be useful to
// userspace on write, so we return it rather than just -EIO
rc = plpks_signed_update_var(&var, flags);
err:
kfree(var.name);
return rc;
}
// PLPKS dynamic secure boot doesn't give us a format string in the same way OPAL does.
// Instead, report the format using the SB_VERSION variable in the keystore.
// The string is made up by us, and takes the form "ibm,plpks-sb-v<n>" (or "ibm,plpks-sb-unknown"
// if the SB_VERSION variable doesn't exist). Hypervisor defines the SB_VERSION variable as a
// "1 byte unsigned integer value".
static ssize_t plpks_secvar_format(char *buf, size_t bufsize)
{
struct plpks_var var = {0};
ssize_t ret;
u8 version;
var.component = NULL;
// Only the signed variables have null bytes in their names, this one doesn't
var.name = "SB_VERSION";
var.namelen = strlen(var.name);
var.datalen = 1;
var.data = &version;
// Unlike the other vars, SB_VERSION is owned by firmware instead of the OS
ret = plpks_read_fw_var(&var);
if (ret) {
if (ret == -ENOENT) {
ret = snprintf(buf, bufsize, "ibm,plpks-sb-unknown");
} else {
pr_err("Error %ld reading SB_VERSION from firmware\n", ret);
ret = -EIO;
}
goto err;
}
ret = snprintf(buf, bufsize, "ibm,plpks-sb-v%hhu", version);
err:
return ret;
}
static int plpks_max_size(u64 *max_size)
{
// The max object size reported by the hypervisor is accurate for the
// object itself, but we use the first 8 bytes of data on write as the
// signed update flags, so the max size a user can write is larger.
*max_size = (u64)plpks_get_maxobjectsize() + sizeof(u64);
return 0;
}
static const struct secvar_operations plpks_secvar_ops = {
.get = plpks_get_variable,
.set = plpks_set_variable,
.format = plpks_secvar_format,
.max_size = plpks_max_size,
.config_attrs = config_attrs,
.var_names = plpks_var_names,
};
static int plpks_secvar_init(void)
{
if (!plpks_is_available())
return -ENODEV;
return set_secvar_ops(&plpks_secvar_ops);
}
machine_device_initcall(pseries, plpks_secvar_init);
| linux-master | arch/powerpc/platforms/pseries/plpks-secvar.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/suspend.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <asm/time.h>
#include <asm/cacheflush.h>
#include <asm/mpc52xx.h>
/* these are defined in mpc52xx_sleep.S, and only used here */
extern void mpc52xx_deep_sleep(void __iomem *sram, void __iomem *sdram_regs,
struct mpc52xx_cdm __iomem *, struct mpc52xx_intr __iomem*);
extern void mpc52xx_ds_sram(void);
extern const long mpc52xx_ds_sram_size;
extern void mpc52xx_ds_cached(void);
extern const long mpc52xx_ds_cached_size;
static void __iomem *mbar;
static void __iomem *sdram;
static struct mpc52xx_cdm __iomem *cdm;
static struct mpc52xx_intr __iomem *intr;
static struct mpc52xx_gpio_wkup __iomem *gpiow;
static void __iomem *sram;
static int sram_size;
struct mpc52xx_suspend mpc52xx_suspend;
static int mpc52xx_pm_valid(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
return 1;
default:
return 0;
}
}
int mpc52xx_set_wakeup_gpio(u8 pin, u8 level)
{
u16 tmp;
/* enable gpio */
out_8(&gpiow->wkup_gpioe, in_8(&gpiow->wkup_gpioe) | (1 << pin));
/* set as input */
out_8(&gpiow->wkup_ddr, in_8(&gpiow->wkup_ddr) & ~(1 << pin));
/* enable deep sleep interrupt */
out_8(&gpiow->wkup_inten, in_8(&gpiow->wkup_inten) | (1 << pin));
/* low/high level creates wakeup interrupt */
tmp = in_be16(&gpiow->wkup_itype);
tmp &= ~(0x3 << (pin * 2));
tmp |= (!level + 1) << (pin * 2);
out_be16(&gpiow->wkup_itype, tmp);
/* master enable */
out_8(&gpiow->wkup_maste, 1);
return 0;
}
int mpc52xx_pm_prepare(void)
{
struct device_node *np;
static const struct of_device_id immr_ids[] = {
{ .compatible = "fsl,mpc5200-immr", },
{ .compatible = "fsl,mpc5200b-immr", },
{ .type = "soc", .compatible = "mpc5200", }, /* lite5200 */
{ .type = "builtin", .compatible = "mpc5200", }, /* efika */
{}
};
struct resource res;
/* map the whole register space */
np = of_find_matching_node(NULL, immr_ids);
if (of_address_to_resource(np, 0, &res)) {
pr_err("mpc52xx_pm_prepare(): could not get IMMR address\n");
of_node_put(np);
return -ENOSYS;
}
mbar = ioremap(res.start, 0xc000); /* we should map whole region including SRAM */
of_node_put(np);
if (!mbar) {
pr_err("mpc52xx_pm_prepare(): could not map registers\n");
return -ENOSYS;
}
/* these offsets are from mpc5200 users manual */
sdram = mbar + 0x100;
cdm = mbar + 0x200;
intr = mbar + 0x500;
gpiow = mbar + 0xc00;
sram = mbar + 0x8000; /* Those will be handled by the */
sram_size = 0x4000; /* bestcomm driver soon */
/* call board suspend code, if applicable */
if (mpc52xx_suspend.board_suspend_prepare)
mpc52xx_suspend.board_suspend_prepare(mbar);
else {
printk(KERN_ALERT "%s: %i don't know how to wake up the board\n",
__func__, __LINE__);
goto out_unmap;
}
return 0;
out_unmap:
iounmap(mbar);
return -ENOSYS;
}
char saved_sram[0x4000];
int mpc52xx_pm_enter(suspend_state_t state)
{
u32 clk_enables;
u32 msr, hid0;
u32 intr_main_mask;
void __iomem * irq_0x500 = (void __iomem *)CONFIG_KERNEL_START + 0x500;
unsigned long irq_0x500_stop = (unsigned long)irq_0x500 + mpc52xx_ds_cached_size;
char saved_0x500[0x600-0x500];
if (WARN_ON(mpc52xx_ds_cached_size > sizeof(saved_0x500)))
return -ENOMEM;
/* disable all interrupts in PIC */
intr_main_mask = in_be32(&intr->main_mask);
out_be32(&intr->main_mask, intr_main_mask | 0x1ffff);
/* don't let DEC expire any time soon */
mtspr(SPRN_DEC, 0x7fffffff);
/* save SRAM */
memcpy(saved_sram, sram, sram_size);
/* copy low level suspend code to sram */
memcpy(sram, mpc52xx_ds_sram, mpc52xx_ds_sram_size);
out_8(&cdm->ccs_sleep_enable, 1);
out_8(&cdm->osc_sleep_enable, 1);
out_8(&cdm->ccs_qreq_test, 1);
/* disable all but SDRAM and bestcomm (SRAM) clocks */
clk_enables = in_be32(&cdm->clk_enables);
out_be32(&cdm->clk_enables, clk_enables & 0x00088000);
/* disable power management */
msr = mfmsr();
mtmsr(msr & ~MSR_POW);
/* enable sleep mode, disable others */
hid0 = mfspr(SPRN_HID0);
mtspr(SPRN_HID0, (hid0 & ~(HID0_DOZE | HID0_NAP | HID0_DPM)) | HID0_SLEEP);
/* save original, copy our irq handler, flush from dcache and invalidate icache */
memcpy(saved_0x500, irq_0x500, mpc52xx_ds_cached_size);
memcpy(irq_0x500, mpc52xx_ds_cached, mpc52xx_ds_cached_size);
flush_icache_range((unsigned long)irq_0x500, irq_0x500_stop);
/* call low-level sleep code */
mpc52xx_deep_sleep(sram, sdram, cdm, intr);
/* restore original irq handler */
memcpy(irq_0x500, saved_0x500, mpc52xx_ds_cached_size);
flush_icache_range((unsigned long)irq_0x500, irq_0x500_stop);
/* restore old power mode */
mtmsr(msr & ~MSR_POW);
mtspr(SPRN_HID0, hid0);
mtmsr(msr);
out_be32(&cdm->clk_enables, clk_enables);
out_8(&cdm->ccs_sleep_enable, 0);
out_8(&cdm->osc_sleep_enable, 0);
/* restore SRAM */
memcpy(sram, saved_sram, sram_size);
/* reenable interrupts in PIC */
out_be32(&intr->main_mask, intr_main_mask);
return 0;
}
void mpc52xx_pm_finish(void)
{
/* call board resume code */
if (mpc52xx_suspend.board_resume_finish)
mpc52xx_suspend.board_resume_finish(mbar);
iounmap(mbar);
}
static const struct platform_suspend_ops mpc52xx_pm_ops = {
.valid = mpc52xx_pm_valid,
.prepare = mpc52xx_pm_prepare,
.enter = mpc52xx_pm_enter,
.finish = mpc52xx_pm_finish,
};
int __init mpc52xx_pm_init(void)
{
suspend_set_ops(&mpc52xx_pm_ops);
return 0;
}
| linux-master | arch/powerpc/platforms/52xx/mpc52xx_pm.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/suspend.h>
#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
#include <asm/switch_to.h>
/* defined in lite5200_sleep.S and only used here */
extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar);
static struct mpc52xx_cdm __iomem *cdm;
static struct mpc52xx_intr __iomem *pic;
static struct mpc52xx_sdma __iomem *bes;
static struct mpc52xx_xlb __iomem *xlb;
static struct mpc52xx_gpio __iomem *gps;
static struct mpc52xx_gpio_wkup __iomem *gpw;
static void __iomem *pci;
static void __iomem *sram;
static const int sram_size = 0x4000; /* 16 kBytes */
static void __iomem *mbar;
static suspend_state_t lite5200_pm_target_state;
static int lite5200_pm_valid(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
return 1;
default:
return 0;
}
}
static int lite5200_pm_begin(suspend_state_t state)
{
if (lite5200_pm_valid(state)) {
lite5200_pm_target_state = state;
return 0;
}
return -EINVAL;
}
static int lite5200_pm_prepare(void)
{
struct device_node *np;
static const struct of_device_id immr_ids[] = {
{ .compatible = "fsl,mpc5200-immr", },
{ .compatible = "fsl,mpc5200b-immr", },
{ .type = "soc", .compatible = "mpc5200", }, /* lite5200 */
{ .type = "builtin", .compatible = "mpc5200", }, /* efika */
{}
};
struct resource res;
/* deep sleep? let mpc52xx code handle that */
if (lite5200_pm_target_state == PM_SUSPEND_STANDBY)
return mpc52xx_pm_prepare();
if (lite5200_pm_target_state != PM_SUSPEND_MEM)
return -EINVAL;
/* map registers */
np = of_find_matching_node(NULL, immr_ids);
of_address_to_resource(np, 0, &res);
of_node_put(np);
mbar = ioremap(res.start, 0xC000);
if (!mbar) {
printk(KERN_ERR "%s:%i Error mapping registers\n", __func__, __LINE__);
return -ENOSYS;
}
cdm = mbar + 0x200;
pic = mbar + 0x500;
gps = mbar + 0xb00;
gpw = mbar + 0xc00;
pci = mbar + 0xd00;
bes = mbar + 0x1200;
xlb = mbar + 0x1f00;
sram = mbar + 0x8000;
return 0;
}
/* save and restore registers not bound to any real devices */
static struct mpc52xx_cdm scdm;
static struct mpc52xx_intr spic;
static struct mpc52xx_sdma sbes;
static struct mpc52xx_xlb sxlb;
static struct mpc52xx_gpio sgps;
static struct mpc52xx_gpio_wkup sgpw;
static char spci[0x200];
static void lite5200_save_regs(void)
{
_memcpy_fromio(&spic, pic, sizeof(*pic));
_memcpy_fromio(&sbes, bes, sizeof(*bes));
_memcpy_fromio(&scdm, cdm, sizeof(*cdm));
_memcpy_fromio(&sxlb, xlb, sizeof(*xlb));
_memcpy_fromio(&sgps, gps, sizeof(*gps));
_memcpy_fromio(&sgpw, gpw, sizeof(*gpw));
_memcpy_fromio(spci, pci, 0x200);
_memcpy_fromio(saved_sram, sram, sram_size);
}
static void lite5200_restore_regs(void)
{
int i;
_memcpy_toio(sram, saved_sram, sram_size);
/* PCI Configuration */
_memcpy_toio(pci, spci, 0x200);
/*
* GPIOs. Interrupt Master Enable has higher address then other
* registers, so just memcpy is ok.
*/
_memcpy_toio(gpw, &sgpw, sizeof(*gpw));
_memcpy_toio(gps, &sgps, sizeof(*gps));
/* XLB Arbitrer */
out_be32(&xlb->snoop_window, sxlb.snoop_window);
out_be32(&xlb->master_priority, sxlb.master_priority);
out_be32(&xlb->master_pri_enable, sxlb.master_pri_enable);
/* enable */
out_be32(&xlb->int_enable, sxlb.int_enable);
out_be32(&xlb->config, sxlb.config);
/* CDM - Clock Distribution Module */
out_8(&cdm->ipb_clk_sel, scdm.ipb_clk_sel);
out_8(&cdm->pci_clk_sel, scdm.pci_clk_sel);
out_8(&cdm->ext_48mhz_en, scdm.ext_48mhz_en);
out_8(&cdm->fd_enable, scdm.fd_enable);
out_be16(&cdm->fd_counters, scdm.fd_counters);
out_be32(&cdm->clk_enables, scdm.clk_enables);
out_8(&cdm->osc_disable, scdm.osc_disable);
out_be16(&cdm->mclken_div_psc1, scdm.mclken_div_psc1);
out_be16(&cdm->mclken_div_psc2, scdm.mclken_div_psc2);
out_be16(&cdm->mclken_div_psc3, scdm.mclken_div_psc3);
out_be16(&cdm->mclken_div_psc6, scdm.mclken_div_psc6);
/* BESTCOMM */
out_be32(&bes->taskBar, sbes.taskBar);
out_be32(&bes->currentPointer, sbes.currentPointer);
out_be32(&bes->endPointer, sbes.endPointer);
out_be32(&bes->variablePointer, sbes.variablePointer);
out_8(&bes->IntVect1, sbes.IntVect1);
out_8(&bes->IntVect2, sbes.IntVect2);
out_be16(&bes->PtdCntrl, sbes.PtdCntrl);
for (i=0; i<32; i++)
out_8(&bes->ipr[i], sbes.ipr[i]);
out_be32(&bes->cReqSelect, sbes.cReqSelect);
out_be32(&bes->task_size0, sbes.task_size0);
out_be32(&bes->task_size1, sbes.task_size1);
out_be32(&bes->MDEDebug, sbes.MDEDebug);
out_be32(&bes->ADSDebug, sbes.ADSDebug);
out_be32(&bes->Value1, sbes.Value1);
out_be32(&bes->Value2, sbes.Value2);
out_be32(&bes->Control, sbes.Control);
out_be32(&bes->Status, sbes.Status);
out_be32(&bes->PTDDebug, sbes.PTDDebug);
/* restore tasks */
for (i=0; i<16; i++)
out_be16(&bes->tcr[i], sbes.tcr[i]);
/* enable interrupts */
out_be32(&bes->IntPend, sbes.IntPend);
out_be32(&bes->IntMask, sbes.IntMask);
/* PIC */
out_be32(&pic->per_pri1, spic.per_pri1);
out_be32(&pic->per_pri2, spic.per_pri2);
out_be32(&pic->per_pri3, spic.per_pri3);
out_be32(&pic->main_pri1, spic.main_pri1);
out_be32(&pic->main_pri2, spic.main_pri2);
out_be32(&pic->enc_status, spic.enc_status);
/* unmask and enable interrupts */
out_be32(&pic->per_mask, spic.per_mask);
out_be32(&pic->main_mask, spic.main_mask);
out_be32(&pic->ctrl, spic.ctrl);
}
static int lite5200_pm_enter(suspend_state_t state)
{
/* deep sleep? let mpc52xx code handle that */
if (state == PM_SUSPEND_STANDBY) {
return mpc52xx_pm_enter(state);
}
lite5200_save_regs();
/* effectively save FP regs */
enable_kernel_fp();
lite5200_low_power(sram, mbar);
lite5200_restore_regs();
iounmap(mbar);
return 0;
}
static void lite5200_pm_finish(void)
{
/* deep sleep? let mpc52xx code handle that */
if (lite5200_pm_target_state == PM_SUSPEND_STANDBY)
mpc52xx_pm_finish();
}
static void lite5200_pm_end(void)
{
lite5200_pm_target_state = PM_SUSPEND_ON;
}
static const struct platform_suspend_ops lite5200_pm_ops = {
.valid = lite5200_pm_valid,
.begin = lite5200_pm_begin,
.prepare = lite5200_pm_prepare,
.enter = lite5200_pm_enter,
.finish = lite5200_pm_finish,
.end = lite5200_pm_end,
};
int __init lite5200_pm_init(void)
{
suspend_set_ops(&lite5200_pm_ops);
return 0;
}
| linux-master | arch/powerpc/platforms/52xx/lite5200_pm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Support for 'mpc5200-simple-platform' compatible boards.
*
* Written by Marian Balakowicz <[email protected]>
* Copyright (C) 2007 Semihalf
*
* Description:
* This code implements support for a simple MPC52xx based boards which
* do not need a custom platform specific setup. Such boards are
* supported assuming the following:
*
* - GPIO pins are configured by the firmware,
* - CDM configuration (clocking) is setup correctly by firmware,
* - if the 'fsl,has-wdt' property is present in one of the
* gpt nodes, then it is safe to use such gpt to reset the board,
* - PCI is supported if enabled in the kernel configuration
* and if there is a PCI bus node defined in the device tree.
*
* Boards that are compatible with this generic platform support
* are listed in a 'board' table.
*/
#undef DEBUG
#include <linux/of.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/mpc52xx.h>
/*
* Setup the architecture
*/
static void __init mpc5200_simple_setup_arch(void)
{
if (ppc_md.progress)
ppc_md.progress("mpc5200_simple_setup_arch()", 0);
/* Map important registers from the internal memory map */
mpc52xx_map_common_devices();
/* Some mpc5200 & mpc5200b related configuration */
mpc5200_setup_xlb_arbiter();
}
/* list of the supported boards */
static const char *board[] __initdata = {
"anonymous,a3m071",
"anonymous,a4m072",
"anon,charon",
"ifm,o2d",
"intercontrol,digsy-mtc",
"manroland,mucmc52",
"manroland,uc101",
"phytec,pcm030",
"phytec,pcm032",
"promess,motionpro",
"schindler,cm5200",
"tqc,tqm5200",
NULL
};
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
static int __init mpc5200_simple_probe(void)
{
return of_device_compatible_match(of_root, board);
}
define_machine(mpc5200_simple_platform) {
.name = "mpc5200-simple-platform",
.probe = mpc5200_simple_probe,
.setup_arch = mpc5200_simple_setup_arch,
.discover_phbs = mpc52xx_setup_pci,
.init = mpc52xx_declare_of_platform_devices,
.init_IRQ = mpc52xx_init_irq,
.get_irq = mpc52xx_get_irq,
.restart = mpc52xx_restart,
};
| linux-master | arch/powerpc/platforms/52xx/mpc5200_simple.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MPC5200 General Purpose Timer device driver
*
* Copyright (c) 2009 Secret Lab Technologies Ltd.
* Copyright (c) 2008 Sascha Hauer <[email protected]>, Pengutronix
*
* This file is a driver for the General Purpose Timer (gpt) devices
* found on the MPC5200 SoC. Each timer has an IO pin which can be used
* for GPIO or can be used to raise interrupts. The timer function can
* be used independently from the IO pin, or it can be used to control
* output signals or measure input signals.
*
* This driver supports the GPIO and IRQ controller functions of the GPT
* device. Timer functions are not yet supported.
*
* The timer gpt0 can be used as watchdog (wdt). If the wdt mode is used,
* this prevents the use of any gpt0 gpt function (i.e. they will fail with
* -EBUSY). Thus, the safety wdt function always has precedence over the gpt
* function. If the kernel has been compiled with CONFIG_WATCHDOG_NOWAYOUT,
* this means that gpt0 is locked in wdt mode until the next reboot - this
* may be a requirement in safety applications.
*
* To use the GPIO function, the following two properties must be added
* to the device tree node for the gpt device (typically in the .dts file
* for the board):
* gpio-controller;
* #gpio-cells = < 2 >;
* This driver will register the GPIO pin if it finds the gpio-controller
* property in the device tree.
*
* To use the IRQ controller function, the following two properties must
* be added to the device tree node for the gpt device:
* interrupt-controller;
* #interrupt-cells = < 1 >;
* The IRQ controller binding only uses one cell to specify the interrupt,
* and the IRQ flags are encoded in the cell. A cell is not used to encode
* the IRQ number because the GPT only has a single IRQ source. For flags,
* a value of '1' means rising edge sensitive and '2' means falling edge.
*
* The GPIO and the IRQ controller functions can be used at the same time,
* but in this use case the IO line will only work as an input. Trying to
* use it as a GPIO output will not work.
*
* When using the GPIO line as an output, it can either be driven as normal
* IO, or it can be an Open Collector (OC) output. At the moment it is the
* responsibility of either the bootloader or the platform setup code to set
* the output mode. This driver does not change the output mode setting.
*/
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/watchdog.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <asm/div64.h>
#include <asm/mpc52xx.h>
MODULE_DESCRIPTION("Freescale MPC52xx gpt driver");
MODULE_AUTHOR("Sascha Hauer, Grant Likely, Albrecht Dreß");
MODULE_LICENSE("GPL");
/**
* struct mpc52xx_gpt - Private data structure for MPC52xx GPT driver
* @dev: pointer to device structure
* @regs: virtual address of GPT registers
* @lock: spinlock to coordinate between different functions.
* @gc: gpio_chip instance structure; used when GPIO is enabled
* @irqhost: Pointer to irq_domain instance; used when IRQ mode is supported
* @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates
* if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates
* if the timer is actively used as wdt which blocks gpt functions
*/
struct mpc52xx_gpt_priv {
struct list_head list; /* List of all GPT devices */
struct device *dev;
struct mpc52xx_gpt __iomem *regs;
raw_spinlock_t lock;
struct irq_domain *irqhost;
u32 ipb_freq;
u8 wdt_mode;
#if defined(CONFIG_GPIOLIB)
struct gpio_chip gc;
#endif
};
LIST_HEAD(mpc52xx_gpt_list);
DEFINE_MUTEX(mpc52xx_gpt_list_mutex);
#define MPC52xx_GPT_MODE_MS_MASK (0x07)
#define MPC52xx_GPT_MODE_MS_IC (0x01)
#define MPC52xx_GPT_MODE_MS_OC (0x02)
#define MPC52xx_GPT_MODE_MS_PWM (0x03)
#define MPC52xx_GPT_MODE_MS_GPIO (0x04)
#define MPC52xx_GPT_MODE_GPIO_MASK (0x30)
#define MPC52xx_GPT_MODE_GPIO_OUT_LOW (0x20)
#define MPC52xx_GPT_MODE_GPIO_OUT_HIGH (0x30)
#define MPC52xx_GPT_MODE_COUNTER_ENABLE (0x1000)
#define MPC52xx_GPT_MODE_CONTINUOUS (0x0400)
#define MPC52xx_GPT_MODE_OPEN_DRAIN (0x0200)
#define MPC52xx_GPT_MODE_IRQ_EN (0x0100)
#define MPC52xx_GPT_MODE_WDT_EN (0x8000)
#define MPC52xx_GPT_MODE_ICT_MASK (0x030000)
#define MPC52xx_GPT_MODE_ICT_RISING (0x010000)
#define MPC52xx_GPT_MODE_ICT_FALLING (0x020000)
#define MPC52xx_GPT_MODE_ICT_TOGGLE (0x030000)
#define MPC52xx_GPT_MODE_WDT_PING (0xa5)
#define MPC52xx_GPT_STATUS_IRQMASK (0x000f)
#define MPC52xx_GPT_CAN_WDT (1 << 0)
#define MPC52xx_GPT_IS_WDT (1 << 1)
/* ---------------------------------------------------------------------
* Cascaded interrupt controller hooks
*/
static void mpc52xx_gpt_irq_unmask(struct irq_data *d)
{
struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d);
unsigned long flags;
raw_spin_lock_irqsave(&gpt->lock, flags);
setbits32(&gpt->regs->mode, MPC52xx_GPT_MODE_IRQ_EN);
raw_spin_unlock_irqrestore(&gpt->lock, flags);
}
static void mpc52xx_gpt_irq_mask(struct irq_data *d)
{
struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d);
unsigned long flags;
raw_spin_lock_irqsave(&gpt->lock, flags);
clrbits32(&gpt->regs->mode, MPC52xx_GPT_MODE_IRQ_EN);
raw_spin_unlock_irqrestore(&gpt->lock, flags);
}
static void mpc52xx_gpt_irq_ack(struct irq_data *d)
{
struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d);
out_be32(&gpt->regs->status, MPC52xx_GPT_STATUS_IRQMASK);
}
static int mpc52xx_gpt_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d);
unsigned long flags;
u32 reg;
dev_dbg(gpt->dev, "%s: virq=%i type=%x\n", __func__, d->irq, flow_type);
raw_spin_lock_irqsave(&gpt->lock, flags);
reg = in_be32(&gpt->regs->mode) & ~MPC52xx_GPT_MODE_ICT_MASK;
if (flow_type & IRQF_TRIGGER_RISING)
reg |= MPC52xx_GPT_MODE_ICT_RISING;
if (flow_type & IRQF_TRIGGER_FALLING)
reg |= MPC52xx_GPT_MODE_ICT_FALLING;
out_be32(&gpt->regs->mode, reg);
raw_spin_unlock_irqrestore(&gpt->lock, flags);
return 0;
}
static struct irq_chip mpc52xx_gpt_irq_chip = {
.name = "MPC52xx GPT",
.irq_unmask = mpc52xx_gpt_irq_unmask,
.irq_mask = mpc52xx_gpt_irq_mask,
.irq_ack = mpc52xx_gpt_irq_ack,
.irq_set_type = mpc52xx_gpt_irq_set_type,
};
static void mpc52xx_gpt_irq_cascade(struct irq_desc *desc)
{
struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc);
u32 status;
status = in_be32(&gpt->regs->status) & MPC52xx_GPT_STATUS_IRQMASK;
if (status)
generic_handle_domain_irq(gpt->irqhost, 0);
}
static int mpc52xx_gpt_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct mpc52xx_gpt_priv *gpt = h->host_data;
dev_dbg(gpt->dev, "%s: h=%p, virq=%i\n", __func__, h, virq);
irq_set_chip_data(virq, gpt);
irq_set_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq);
return 0;
}
static int mpc52xx_gpt_irq_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
{
struct mpc52xx_gpt_priv *gpt = h->host_data;
dev_dbg(gpt->dev, "%s: flags=%i\n", __func__, intspec[0]);
if ((intsize < 1) || (intspec[0] > 3)) {
dev_err(gpt->dev, "bad irq specifier in %pOF\n", ct);
return -EINVAL;
}
*out_hwirq = 0; /* The GPT only has 1 IRQ line */
*out_flags = intspec[0];
return 0;
}
static const struct irq_domain_ops mpc52xx_gpt_irq_ops = {
.map = mpc52xx_gpt_irq_map,
.xlate = mpc52xx_gpt_irq_xlate,
};
static void
mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
{
int cascade_virq;
unsigned long flags;
u32 mode;
cascade_virq = irq_of_parse_and_map(node, 0);
if (!cascade_virq)
return;
gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt);
if (!gpt->irqhost) {
dev_err(gpt->dev, "irq_domain_add_linear() failed\n");
return;
}
irq_set_handler_data(cascade_virq, gpt);
irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);
/* If the GPT is currently disabled, then change it to be in Input
* Capture mode. If the mode is non-zero, then the pin could be
* already in use for something. */
raw_spin_lock_irqsave(&gpt->lock, flags);
mode = in_be32(&gpt->regs->mode);
if ((mode & MPC52xx_GPT_MODE_MS_MASK) == 0)
out_be32(&gpt->regs->mode, mode | MPC52xx_GPT_MODE_MS_IC);
raw_spin_unlock_irqrestore(&gpt->lock, flags);
dev_dbg(gpt->dev, "%s() complete. virq=%i\n", __func__, cascade_virq);
}
/* ---------------------------------------------------------------------
* GPIOLIB hooks
*/
#if defined(CONFIG_GPIOLIB)
static int mpc52xx_gpt_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct mpc52xx_gpt_priv *gpt = gpiochip_get_data(gc);
return (in_be32(&gpt->regs->status) >> 8) & 1;
}
static void
mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int v)
{
struct mpc52xx_gpt_priv *gpt = gpiochip_get_data(gc);
unsigned long flags;
u32 r;
dev_dbg(gpt->dev, "%s: gpio:%d v:%d\n", __func__, gpio, v);
r = v ? MPC52xx_GPT_MODE_GPIO_OUT_HIGH : MPC52xx_GPT_MODE_GPIO_OUT_LOW;
raw_spin_lock_irqsave(&gpt->lock, flags);
clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_GPIO_MASK, r);
raw_spin_unlock_irqrestore(&gpt->lock, flags);
}
static int mpc52xx_gpt_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
struct mpc52xx_gpt_priv *gpt = gpiochip_get_data(gc);
unsigned long flags;
dev_dbg(gpt->dev, "%s: gpio:%d\n", __func__, gpio);
raw_spin_lock_irqsave(&gpt->lock, flags);
clrbits32(&gpt->regs->mode, MPC52xx_GPT_MODE_GPIO_MASK);
raw_spin_unlock_irqrestore(&gpt->lock, flags);
return 0;
}
static int
mpc52xx_gpt_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
mpc52xx_gpt_gpio_set(gc, gpio, val);
return 0;
}
static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt)
{
int rc;
/* Only setup GPIO if the device claims the GPT is a GPIO controller */
if (!device_property_present(gpt->dev, "gpio-controller"))
return;
gpt->gc.label = kasprintf(GFP_KERNEL, "%pfw", dev_fwnode(gpt->dev));
if (!gpt->gc.label) {
dev_err(gpt->dev, "out of memory\n");
return;
}
gpt->gc.ngpio = 1;
gpt->gc.direction_input = mpc52xx_gpt_gpio_dir_in;
gpt->gc.direction_output = mpc52xx_gpt_gpio_dir_out;
gpt->gc.get = mpc52xx_gpt_gpio_get;
gpt->gc.set = mpc52xx_gpt_gpio_set;
gpt->gc.base = -1;
gpt->gc.parent = gpt->dev;
/* Setup external pin in GPIO mode */
clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_MS_MASK,
MPC52xx_GPT_MODE_MS_GPIO);
rc = gpiochip_add_data(&gpt->gc, gpt);
if (rc)
dev_err(gpt->dev, "gpiochip_add_data() failed; rc=%i\n", rc);
dev_dbg(gpt->dev, "%s() complete.\n", __func__);
}
#else /* defined(CONFIG_GPIOLIB) */
static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt) { }
#endif /* defined(CONFIG_GPIOLIB) */
/***********************************************************************
* Timer API
*/
/**
* mpc52xx_gpt_from_irq - Return the GPT device associated with an IRQ number
* @irq: irq of timer.
*/
struct mpc52xx_gpt_priv *mpc52xx_gpt_from_irq(int irq)
{
struct mpc52xx_gpt_priv *gpt;
struct list_head *pos;
/* Iterate over the list of timers looking for a matching device */
mutex_lock(&mpc52xx_gpt_list_mutex);
list_for_each(pos, &mpc52xx_gpt_list) {
gpt = container_of(pos, struct mpc52xx_gpt_priv, list);
if (gpt->irqhost && irq == irq_linear_revmap(gpt->irqhost, 0)) {
mutex_unlock(&mpc52xx_gpt_list_mutex);
return gpt;
}
}
mutex_unlock(&mpc52xx_gpt_list_mutex);
return NULL;
}
EXPORT_SYMBOL(mpc52xx_gpt_from_irq);
static int mpc52xx_gpt_do_start(struct mpc52xx_gpt_priv *gpt, u64 period,
int continuous, int as_wdt)
{
u32 clear, set;
u64 clocks;
u32 prescale;
unsigned long flags;
clear = MPC52xx_GPT_MODE_MS_MASK | MPC52xx_GPT_MODE_CONTINUOUS;
set = MPC52xx_GPT_MODE_MS_GPIO | MPC52xx_GPT_MODE_COUNTER_ENABLE;
if (as_wdt) {
clear |= MPC52xx_GPT_MODE_IRQ_EN;
set |= MPC52xx_GPT_MODE_WDT_EN;
} else if (continuous)
set |= MPC52xx_GPT_MODE_CONTINUOUS;
/* Determine the number of clocks in the requested period. 64 bit
* arithmetic is done here to preserve the precision until the value
* is scaled back down into the u32 range. Period is in 'ns', bus
* frequency is in Hz. */
clocks = period * (u64)gpt->ipb_freq;
do_div(clocks, 1000000000); /* Scale it down to ns range */
/* This device cannot handle a clock count greater than 32 bits */
if (clocks > 0xffffffff)
return -EINVAL;
/* Calculate the prescaler and count values from the clocks value.
* 'clocks' is the number of clock ticks in the period. The timer
* has 16 bit precision and a 16 bit prescaler. Prescaler is
* calculated by integer dividing the clocks by 0x10000 (shifting
* down 16 bits) to obtain the smallest possible divisor for clocks
* to get a 16 bit count value.
*
* Note: the prescale register is '1' based, not '0' based. ie. a
* value of '1' means divide the clock by one. 0xffff divides the
* clock by 0xffff. '0x0000' does not divide by zero, but wraps
* around and divides by 0x10000. That is why prescale must be
* a u32 variable, not a u16, for this calculation. */
prescale = (clocks >> 16) + 1;
do_div(clocks, prescale);
if (clocks > 0xffff) {
pr_err("calculation error; prescale:%x clocks:%llx\n",
prescale, clocks);
return -EINVAL;
}
/* Set and enable the timer, reject an attempt to use a wdt as gpt */
raw_spin_lock_irqsave(&gpt->lock, flags);
if (as_wdt)
gpt->wdt_mode |= MPC52xx_GPT_IS_WDT;
else if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) {
raw_spin_unlock_irqrestore(&gpt->lock, flags);
return -EBUSY;
}
out_be32(&gpt->regs->count, prescale << 16 | clocks);
clrsetbits_be32(&gpt->regs->mode, clear, set);
raw_spin_unlock_irqrestore(&gpt->lock, flags);
return 0;
}
/**
* mpc52xx_gpt_start_timer - Set and enable the GPT timer
* @gpt: Pointer to gpt private data structure
* @period: period of timer in ns; max. ~130s @ 33MHz IPB clock
* @continuous: set to 1 to make timer continuous free running
*
* An interrupt will be generated every time the timer fires
*/
int mpc52xx_gpt_start_timer(struct mpc52xx_gpt_priv *gpt, u64 period,
int continuous)
{
return mpc52xx_gpt_do_start(gpt, period, continuous, 0);
}
EXPORT_SYMBOL(mpc52xx_gpt_start_timer);
/**
* mpc52xx_gpt_stop_timer - Stop a gpt
* @gpt: Pointer to gpt private data structure
*
* Returns an error if attempting to stop a wdt
*/
int mpc52xx_gpt_stop_timer(struct mpc52xx_gpt_priv *gpt)
{
unsigned long flags;
/* reject the operation if the timer is used as watchdog (gpt 0 only) */
raw_spin_lock_irqsave(&gpt->lock, flags);
if ((gpt->wdt_mode & MPC52xx_GPT_IS_WDT) != 0) {
raw_spin_unlock_irqrestore(&gpt->lock, flags);
return -EBUSY;
}
clrbits32(&gpt->regs->mode, MPC52xx_GPT_MODE_COUNTER_ENABLE);
raw_spin_unlock_irqrestore(&gpt->lock, flags);
return 0;
}
EXPORT_SYMBOL(mpc52xx_gpt_stop_timer);
/**
* mpc52xx_gpt_timer_period - Read the timer period
* @gpt: Pointer to gpt private data structure
*
* Returns the timer period in ns
*/
u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt)
{
u64 period;
u64 prescale;
unsigned long flags;
raw_spin_lock_irqsave(&gpt->lock, flags);
period = in_be32(&gpt->regs->count);
raw_spin_unlock_irqrestore(&gpt->lock, flags);
prescale = period >> 16;
period &= 0xffff;
if (prescale == 0)
prescale = 0x10000;
period = period * prescale * 1000000000ULL;
do_div(period, gpt->ipb_freq);
return period;
}
EXPORT_SYMBOL(mpc52xx_gpt_timer_period);
#if defined(CONFIG_MPC5200_WDT)
/***********************************************************************
* Watchdog API for gpt0
*/
#define WDT_IDENTITY "mpc52xx watchdog on GPT0"
/* wdt_is_active stores whether or not the /dev/watchdog device is opened */
static unsigned long wdt_is_active;
/* wdt-capable gpt */
static struct mpc52xx_gpt_priv *mpc52xx_gpt_wdt;
/* low-level wdt functions */
static inline void mpc52xx_gpt_wdt_ping(struct mpc52xx_gpt_priv *gpt_wdt)
{
unsigned long flags;
raw_spin_lock_irqsave(&gpt_wdt->lock, flags);
out_8((u8 *) &gpt_wdt->regs->mode, MPC52xx_GPT_MODE_WDT_PING);
raw_spin_unlock_irqrestore(&gpt_wdt->lock, flags);
}
/* wdt misc device api */
static ssize_t mpc52xx_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
mpc52xx_gpt_wdt_ping(gpt_wdt);
return 0;
}
static const struct watchdog_info mpc5200_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
.identity = WDT_IDENTITY,
};
static long mpc52xx_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
int __user *data = (int __user *)arg;
int timeout;
u64 real_timeout;
int ret = 0;
switch (cmd) {
case WDIOC_GETSUPPORT:
ret = copy_to_user(data, &mpc5200_wdt_info,
sizeof(mpc5200_wdt_info));
if (ret)
ret = -EFAULT;
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
ret = put_user(0, data);
break;
case WDIOC_KEEPALIVE:
mpc52xx_gpt_wdt_ping(gpt_wdt);
break;
case WDIOC_SETTIMEOUT:
ret = get_user(timeout, data);
if (ret)
break;
real_timeout = (u64) timeout * 1000000000ULL;
ret = mpc52xx_gpt_do_start(gpt_wdt, real_timeout, 0, 1);
if (ret)
break;
/* fall through and return the timeout */
fallthrough;
case WDIOC_GETTIMEOUT:
/* we need to round here as to avoid e.g. the following
* situation:
* - timeout requested is 1 second;
* - real timeout @33MHz is 999997090ns
* - the int divide by 10^9 will return 0.
*/
real_timeout =
mpc52xx_gpt_timer_period(gpt_wdt) + 500000000ULL;
do_div(real_timeout, 1000000000ULL);
timeout = (int) real_timeout;
ret = put_user(timeout, data);
break;
default:
ret = -ENOTTY;
}
return ret;
}
static int mpc52xx_wdt_open(struct inode *inode, struct file *file)
{
int ret;
/* sanity check */
if (!mpc52xx_gpt_wdt)
return -ENODEV;
/* /dev/watchdog can only be opened once */
if (test_and_set_bit(0, &wdt_is_active))
return -EBUSY;
/* Set and activate the watchdog with 30 seconds timeout */
ret = mpc52xx_gpt_do_start(mpc52xx_gpt_wdt, 30ULL * 1000000000ULL,
0, 1);
if (ret) {
clear_bit(0, &wdt_is_active);
return ret;
}
file->private_data = mpc52xx_gpt_wdt;
return stream_open(inode, file);
}
static int mpc52xx_wdt_release(struct inode *inode, struct file *file)
{
/* note: releasing the wdt in NOWAYOUT-mode does not stop it */
#if !defined(CONFIG_WATCHDOG_NOWAYOUT)
struct mpc52xx_gpt_priv *gpt_wdt = file->private_data;
unsigned long flags;
raw_spin_lock_irqsave(&gpt_wdt->lock, flags);
clrbits32(&gpt_wdt->regs->mode,
MPC52xx_GPT_MODE_COUNTER_ENABLE | MPC52xx_GPT_MODE_WDT_EN);
gpt_wdt->wdt_mode &= ~MPC52xx_GPT_IS_WDT;
raw_spin_unlock_irqrestore(&gpt_wdt->lock, flags);
#endif
clear_bit(0, &wdt_is_active);
return 0;
}
static const struct file_operations mpc52xx_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = mpc52xx_wdt_write,
.unlocked_ioctl = mpc52xx_wdt_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.open = mpc52xx_wdt_open,
.release = mpc52xx_wdt_release,
};
static struct miscdevice mpc52xx_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &mpc52xx_wdt_fops,
};
static int mpc52xx_gpt_wdt_init(void)
{
int err;
/* try to register the watchdog misc device */
err = misc_register(&mpc52xx_wdt_miscdev);
if (err)
pr_err("%s: cannot register watchdog device\n", WDT_IDENTITY);
else
pr_info("%s: watchdog device registered\n", WDT_IDENTITY);
return err;
}
static int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt,
const u32 *period)
{
u64 real_timeout;
/* remember the gpt for the wdt operation */
mpc52xx_gpt_wdt = gpt;
/* configure the wdt if the device tree contained a timeout */
if (!period || *period == 0)
return 0;
real_timeout = (u64) *period * 1000000000ULL;
if (mpc52xx_gpt_do_start(gpt, real_timeout, 0, 1))
dev_warn(gpt->dev, "starting as wdt failed\n");
else
dev_info(gpt->dev, "watchdog set to %us timeout\n", *period);
return 0;
}
#else
static int mpc52xx_gpt_wdt_init(void)
{
return 0;
}
static inline int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt,
const u32 *period)
{
return 0;
}
#endif /* CONFIG_MPC5200_WDT */
/* ---------------------------------------------------------------------
* of_platform bus binding code
*/
static int mpc52xx_gpt_probe(struct platform_device *ofdev)
{
struct mpc52xx_gpt_priv *gpt;
gpt = devm_kzalloc(&ofdev->dev, sizeof *gpt, GFP_KERNEL);
if (!gpt)
return -ENOMEM;
raw_spin_lock_init(&gpt->lock);
gpt->dev = &ofdev->dev;
gpt->ipb_freq = mpc5xxx_get_bus_frequency(&ofdev->dev);
gpt->regs = of_iomap(ofdev->dev.of_node, 0);
if (!gpt->regs)
return -ENOMEM;
dev_set_drvdata(&ofdev->dev, gpt);
mpc52xx_gpt_gpio_setup(gpt);
mpc52xx_gpt_irq_setup(gpt, ofdev->dev.of_node);
mutex_lock(&mpc52xx_gpt_list_mutex);
list_add(&gpt->list, &mpc52xx_gpt_list);
mutex_unlock(&mpc52xx_gpt_list_mutex);
/* check if this device could be a watchdog */
if (of_property_read_bool(ofdev->dev.of_node, "fsl,has-wdt") ||
of_property_read_bool(ofdev->dev.of_node, "has-wdt")) {
const u32 *on_boot_wdt;
gpt->wdt_mode = MPC52xx_GPT_CAN_WDT;
on_boot_wdt = of_get_property(ofdev->dev.of_node,
"fsl,wdt-on-boot", NULL);
if (on_boot_wdt) {
dev_info(gpt->dev, "used as watchdog\n");
gpt->wdt_mode |= MPC52xx_GPT_IS_WDT;
} else
dev_info(gpt->dev, "can function as watchdog\n");
mpc52xx_gpt_wdt_setup(gpt, on_boot_wdt);
}
return 0;
}
static const struct of_device_id mpc52xx_gpt_match[] = {
{ .compatible = "fsl,mpc5200-gpt", },
/* Depreciated compatible values; don't use for new dts files */
{ .compatible = "fsl,mpc5200-gpt-gpio", },
{ .compatible = "mpc5200-gpt", },
{}
};
static struct platform_driver mpc52xx_gpt_driver = {
.driver = {
.name = "mpc52xx-gpt",
.suppress_bind_attrs = true,
.of_match_table = mpc52xx_gpt_match,
},
.probe = mpc52xx_gpt_probe,
};
static int __init mpc52xx_gpt_init(void)
{
return platform_driver_register(&mpc52xx_gpt_driver);
}
/* Make sure GPIOs and IRQs get set up before anyone tries to use them */
subsys_initcall(mpc52xx_gpt_init);
device_initcall(mpc52xx_gpt_wdt_init);
| linux-master | arch/powerpc/platforms/52xx/mpc52xx_gpt.c |
/*
*
* Utility functions for the Freescale MPC52xx.
*
* Copyright (C) 2006 Sylvain Munaut <[email protected]>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*
*/
#undef DEBUG
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/export.h>
#include <asm/io.h>
#include <asm/mpc52xx.h>
/* MPC5200 device tree match tables */
static const struct of_device_id mpc52xx_xlb_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-xlb", },
{ .compatible = "mpc5200-xlb", },
{}
};
static const struct of_device_id mpc52xx_bus_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-immr", },
{ .compatible = "fsl,mpc5200b-immr", },
{ .compatible = "simple-bus", },
/* depreciated matches; shouldn't be used in new device trees */
{ .compatible = "fsl,lpb", },
{ .type = "builtin", .compatible = "mpc5200", }, /* efika */
{ .type = "soc", .compatible = "mpc5200", }, /* lite5200 */
{}
};
/*
* This variable is mapped in mpc52xx_map_wdt() and used in mpc52xx_restart().
* Permanent mapping is required because mpc52xx_restart() can be called
* from interrupt context while node mapping (which calls ioremap())
* cannot be used at such point.
*/
static DEFINE_SPINLOCK(mpc52xx_lock);
static struct mpc52xx_gpt __iomem *mpc52xx_wdt;
static struct mpc52xx_cdm __iomem *mpc52xx_cdm;
/*
* Configure the XLB arbiter settings to match what Linux expects.
*/
void __init
mpc5200_setup_xlb_arbiter(void)
{
struct device_node *np;
struct mpc52xx_xlb __iomem *xlb;
np = of_find_matching_node(NULL, mpc52xx_xlb_ids);
xlb = of_iomap(np, 0);
of_node_put(np);
if (!xlb) {
printk(KERN_ERR __FILE__ ": "
"Error mapping XLB in mpc52xx_setup_cpu(). "
"Expect some abnormal behavior\n");
return;
}
/* Configure the XLB Arbiter priorities */
out_be32(&xlb->master_pri_enable, 0xff);
out_be32(&xlb->master_priority, 0x11111111);
/*
* Disable XLB pipelining
* (cfr errate 292. We could do this only just before ATA PIO
* transaction and re-enable it afterwards ...)
* Not needed on MPC5200B.
*/
if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
out_be32(&xlb->config, in_be32(&xlb->config) | MPC52xx_XLB_CFG_PLDIS);
iounmap(xlb);
}
/*
* This variable is mapped in mpc52xx_map_common_devices and
* used in mpc5200_psc_ac97_gpio_reset().
*/
static DEFINE_SPINLOCK(gpio_lock);
struct mpc52xx_gpio __iomem *simple_gpio;
struct mpc52xx_gpio_wkup __iomem *wkup_gpio;
/**
* mpc52xx_declare_of_platform_devices: register internal devices and children
* of the localplus bus to the of_platform
* bus.
*/
void __init mpc52xx_declare_of_platform_devices(void)
{
/* Find all the 'platform' devices and register them. */
if (of_platform_populate(NULL, mpc52xx_bus_ids, NULL, NULL))
pr_err(__FILE__ ": Error while populating devices from DT\n");
}
/*
* match tables used by mpc52xx_map_common_devices()
*/
static const struct of_device_id mpc52xx_gpt_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-gpt", },
{ .compatible = "mpc5200-gpt", }, /* old */
{}
};
static const struct of_device_id mpc52xx_cdm_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-cdm", },
{ .compatible = "mpc5200-cdm", }, /* old */
{}
};
static const struct of_device_id mpc52xx_gpio_simple[] __initconst = {
{ .compatible = "fsl,mpc5200-gpio", },
{}
};
static const struct of_device_id mpc52xx_gpio_wkup[] __initconst = {
{ .compatible = "fsl,mpc5200-gpio-wkup", },
{}
};
/**
* mpc52xx_map_common_devices: iomap devices required by common code
*/
void __init
mpc52xx_map_common_devices(void)
{
struct device_node *np;
/* mpc52xx_wdt is mapped here and used in mpc52xx_restart,
* possibly from a interrupt context. wdt is only implement
* on a gpt0, so check has-wdt property before mapping.
*/
for_each_matching_node(np, mpc52xx_gpt_ids) {
if (of_property_read_bool(np, "fsl,has-wdt") ||
of_property_read_bool(np, "has-wdt")) {
mpc52xx_wdt = of_iomap(np, 0);
of_node_put(np);
break;
}
}
/* Clock Distribution Module, used by PSC clock setting function */
np = of_find_matching_node(NULL, mpc52xx_cdm_ids);
mpc52xx_cdm = of_iomap(np, 0);
of_node_put(np);
/* simple_gpio registers */
np = of_find_matching_node(NULL, mpc52xx_gpio_simple);
simple_gpio = of_iomap(np, 0);
of_node_put(np);
/* wkup_gpio registers */
np = of_find_matching_node(NULL, mpc52xx_gpio_wkup);
wkup_gpio = of_iomap(np, 0);
of_node_put(np);
}
/**
* mpc52xx_set_psc_clkdiv: Set clock divider in the CDM for PSC ports
*
* @psc_id: id of psc port; must be 1,2,3 or 6
* @clkdiv: clock divider value to put into CDM PSC register.
*/
int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv)
{
unsigned long flags;
u16 __iomem *reg;
u32 val;
u32 mask;
u32 mclken_div;
if (!mpc52xx_cdm)
return -ENODEV;
mclken_div = 0x8000 | (clkdiv & 0x1FF);
switch (psc_id) {
case 1: reg = &mpc52xx_cdm->mclken_div_psc1; mask = 0x20; break;
case 2: reg = &mpc52xx_cdm->mclken_div_psc2; mask = 0x40; break;
case 3: reg = &mpc52xx_cdm->mclken_div_psc3; mask = 0x80; break;
case 6: reg = &mpc52xx_cdm->mclken_div_psc6; mask = 0x10; break;
default:
return -ENODEV;
}
/* Set the rate and enable the clock */
spin_lock_irqsave(&mpc52xx_lock, flags);
out_be16(reg, mclken_div);
val = in_be32(&mpc52xx_cdm->clk_enables);
out_be32(&mpc52xx_cdm->clk_enables, val | mask);
spin_unlock_irqrestore(&mpc52xx_lock, flags);
return 0;
}
EXPORT_SYMBOL(mpc52xx_set_psc_clkdiv);
/**
* mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer
*/
void __noreturn mpc52xx_restart(char *cmd)
{
local_irq_disable();
/* Turn on the watchdog and wait for it to expire.
* It effectively does a reset. */
if (mpc52xx_wdt) {
out_be32(&mpc52xx_wdt->mode, 0x00000000);
out_be32(&mpc52xx_wdt->count, 0x000000ff);
out_be32(&mpc52xx_wdt->mode, 0x00009004);
} else
printk(KERN_ERR __FILE__ ": "
"mpc52xx_restart: Can't access wdt. "
"Restart impossible, system halted.\n");
while (1);
}
#define PSC1_RESET 0x1
#define PSC1_SYNC 0x4
#define PSC1_SDATA_OUT 0x1
#define PSC2_RESET 0x2
#define PSC2_SYNC (0x4<<4)
#define PSC2_SDATA_OUT (0x1<<4)
#define MPC52xx_GPIO_PSC1_MASK 0x7
#define MPC52xx_GPIO_PSC2_MASK (0x7<<4)
/**
* mpc5200_psc_ac97_gpio_reset: Use gpio pins to reset the ac97 bus
*
* @psc: psc number to reset (only psc 1 and 2 support ac97)
*/
int mpc5200_psc_ac97_gpio_reset(int psc_number)
{
unsigned long flags;
u32 gpio;
u32 mux;
int out;
int reset;
int sync;
if ((!simple_gpio) || (!wkup_gpio))
return -ENODEV;
switch (psc_number) {
case 0:
reset = PSC1_RESET; /* AC97_1_RES */
sync = PSC1_SYNC; /* AC97_1_SYNC */
out = PSC1_SDATA_OUT; /* AC97_1_SDATA_OUT */
gpio = MPC52xx_GPIO_PSC1_MASK;
break;
case 1:
reset = PSC2_RESET; /* AC97_2_RES */
sync = PSC2_SYNC; /* AC97_2_SYNC */
out = PSC2_SDATA_OUT; /* AC97_2_SDATA_OUT */
gpio = MPC52xx_GPIO_PSC2_MASK;
break;
default:
pr_err(__FILE__ ": Unable to determine PSC, no ac97 "
"cold-reset will be performed\n");
return -ENODEV;
}
spin_lock_irqsave(&gpio_lock, flags);
/* Reconfigure pin-muxing to gpio */
mux = in_be32(&simple_gpio->port_config);
out_be32(&simple_gpio->port_config, mux & (~gpio));
/* enable gpio pins for output */
setbits8(&wkup_gpio->wkup_gpioe, reset);
setbits32(&simple_gpio->simple_gpioe, sync | out);
setbits8(&wkup_gpio->wkup_ddr, reset);
setbits32(&simple_gpio->simple_ddr, sync | out);
/* Assert cold reset */
clrbits32(&simple_gpio->simple_dvo, sync | out);
clrbits8(&wkup_gpio->wkup_dvo, reset);
/* wait for 1 us */
udelay(1);
/* Deassert reset */
setbits8(&wkup_gpio->wkup_dvo, reset);
/* wait at least 200ns */
/* 7 ~= (200ns * timebase) / ns2sec */
__delay(7);
/* Restore pin-muxing */
out_be32(&simple_gpio->port_config, mux);
spin_unlock_irqrestore(&gpio_lock, flags);
return 0;
}
EXPORT_SYMBOL(mpc5200_psc_ac97_gpio_reset);
| linux-master | arch/powerpc/platforms/52xx/mpc52xx_common.c |
/*
*
* Programmable Interrupt Controller functions for the Freescale MPC52xx.
*
* Copyright (C) 2008 Secret Lab Technologies Ltd.
* Copyright (C) 2006 bplan GmbH
* Copyright (C) 2004 Sylvain Munaut <[email protected]>
* Copyright (C) 2003 Montavista Software, Inc
*
* Based on the code from the 2.4 kernel by
* Dale Farnsworth <[email protected]> and Kent Borg.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*
*/
/*
* This is the device driver for the MPC5200 interrupt controller.
*
* hardware overview
* -----------------
* The MPC5200 interrupt controller groups the all interrupt sources into
* three groups called 'critical', 'main', and 'peripheral'. The critical
* group has 3 irqs, External IRQ0, slice timer 0 irq, and wake from deep
* sleep. Main group include the other 3 external IRQs, slice timer 1, RTC,
* gpios, and the general purpose timers. Peripheral group contains the
* remaining irq sources from all of the on-chip peripherals (PSCs, Ethernet,
* USB, DMA, etc).
*
* virqs
* -----
* The Linux IRQ subsystem requires that each irq source be assigned a
* system wide unique IRQ number starting at 1 (0 means no irq). Since
* systems can have multiple interrupt controllers, the virtual IRQ (virq)
* infrastructure lets each interrupt controller to define a local set
* of IRQ numbers and the virq infrastructure maps those numbers into
* a unique range of the global IRQ# space.
*
* To define a range of virq numbers for this controller, this driver first
* assigns a number to each of the irq groups (called the level 1 or L1
* value). Within each group individual irq sources are also assigned a
* number, as defined by the MPC5200 user guide, and refers to it as the
* level 2 or L2 value. The virq number is determined by shifting up the
* L1 value by MPC52xx_IRQ_L1_OFFSET and ORing it with the L2 value.
*
* For example, the TMR0 interrupt is irq 9 in the main group. The
* virq for TMR0 is calculated by ((1 << MPC52xx_IRQ_L1_OFFSET) | 9).
*
* The observant reader will also notice that this driver defines a 4th
* interrupt group called 'bestcomm'. The bestcomm group isn't physically
* part of the MPC5200 interrupt controller, but it is used here to assign
* a separate virq number for each bestcomm task (since any of the 16
* bestcomm tasks can cause the bestcomm interrupt to be raised). When a
* bestcomm interrupt occurs (peripheral group, irq 0) this driver determines
* which task needs servicing and returns the irq number for that task. This
* allows drivers which use bestcomm to define their own interrupt handlers.
*
* irq_chip structures
* -------------------
* For actually manipulating IRQs (masking, enabling, clearing, etc) this
* driver defines four separate 'irq_chip' structures, one for the main
* group, one for the peripherals group, one for the bestcomm group and one
* for external interrupts. The irq_chip structures provide the hooks needed
* to manipulate each IRQ source, and since each group is has a separate set
* of registers for controlling the irq, it makes sense to divide up the
* hooks along those lines.
*
* You'll notice that there is not an irq_chip for the critical group and
* you'll also notice that there is an irq_chip defined for external
* interrupts even though there is no external interrupt group. The reason
* for this is that the four external interrupts are all managed with the same
* register even though one of the external IRQs is in the critical group and
* the other three are in the main group. For this reason it makes sense for
* the 4 external irqs to be managed using a separate set of hooks. The
* reason there is no crit irq_chip is that of the 3 irqs in the critical
* group, only external interrupt is actually support at this time by this
* driver and since external interrupt is the only one used, it can just
* be directed to make use of the external irq irq_chip.
*
* device tree bindings
* --------------------
* The device tree bindings for this controller reflect the two level
* organization of irqs in the device. #interrupt-cells = <3> where the
* first cell is the group number [0..3], the second cell is the irq
* number in the group, and the third cell is the sense type (level/edge).
* For reference, the following is a list of the interrupt property values
* associated with external interrupt sources on the MPC5200 (just because
* it is non-obvious to determine what the interrupts property should be
* when reading the mpc5200 manual and it is a frequently asked question).
*
* External interrupts:
* <0 0 n> external irq0, n is sense (n=0: level high,
* <1 1 n> external irq1, n is sense n=1: edge rising,
* <1 2 n> external irq2, n is sense n=2: edge falling,
* <1 3 n> external irq3, n is sense n=3: level low)
*/
#undef DEBUG
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/io.h>
#include <asm/mpc52xx.h>
/* HW IRQ mapping */
#define MPC52xx_IRQ_L1_CRIT (0)
#define MPC52xx_IRQ_L1_MAIN (1)
#define MPC52xx_IRQ_L1_PERP (2)
#define MPC52xx_IRQ_L1_SDMA (3)
#define MPC52xx_IRQ_L1_OFFSET (6)
#define MPC52xx_IRQ_L1_MASK (0x00c0)
#define MPC52xx_IRQ_L2_MASK (0x003f)
#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0)
/* MPC5200 device tree match tables */
static const struct of_device_id mpc52xx_pic_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-pic", },
{ .compatible = "mpc5200-pic", },
{}
};
static const struct of_device_id mpc52xx_sdma_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-bestcomm", },
{ .compatible = "mpc5200-bestcomm", },
{}
};
static struct mpc52xx_intr __iomem *intr;
static struct mpc52xx_sdma __iomem *sdma;
static struct irq_domain *mpc52xx_irqhost = NULL;
static unsigned char mpc52xx_map_senses[4] = {
IRQ_TYPE_LEVEL_HIGH,
IRQ_TYPE_EDGE_RISING,
IRQ_TYPE_EDGE_FALLING,
IRQ_TYPE_LEVEL_LOW,
};
/* Utility functions */
static inline void io_be_setbit(u32 __iomem *addr, int bitno)
{
out_be32(addr, in_be32(addr) | (1 << bitno));
}
static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
{
out_be32(addr, in_be32(addr) & ~(1 << bitno));
}
/*
* IRQ[0-3] interrupt irq_chip
*/
static void mpc52xx_extirq_mask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&intr->ctrl, 11 - l2irq);
}
static void mpc52xx_extirq_unmask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->ctrl, 11 - l2irq);
}
static void mpc52xx_extirq_ack(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->ctrl, 27-l2irq);
}
static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type)
{
u32 ctrl_reg, type;
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
void *handler = handle_level_irq;
pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__,
(int) irqd_to_hwirq(d), l2irq, flow_type);
switch (flow_type) {
case IRQF_TRIGGER_HIGH: type = 0; break;
case IRQF_TRIGGER_RISING: type = 1; handler = handle_edge_irq; break;
case IRQF_TRIGGER_FALLING: type = 2; handler = handle_edge_irq; break;
case IRQF_TRIGGER_LOW: type = 3; break;
default:
type = 0;
}
ctrl_reg = in_be32(&intr->ctrl);
ctrl_reg &= ~(0x3 << (22 - (l2irq * 2)));
ctrl_reg |= (type << (22 - (l2irq * 2)));
out_be32(&intr->ctrl, ctrl_reg);
irq_set_handler_locked(d, handler);
return 0;
}
static struct irq_chip mpc52xx_extirq_irqchip = {
.name = "MPC52xx External",
.irq_mask = mpc52xx_extirq_mask,
.irq_unmask = mpc52xx_extirq_unmask,
.irq_ack = mpc52xx_extirq_ack,
.irq_set_type = mpc52xx_extirq_set_type,
};
/*
* Main interrupt irq_chip
*/
static int mpc52xx_null_set_type(struct irq_data *d, unsigned int flow_type)
{
return 0; /* Do nothing so that the sense mask will get updated */
}
static void mpc52xx_main_mask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->main_mask, 16 - l2irq);
}
static void mpc52xx_main_unmask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&intr->main_mask, 16 - l2irq);
}
static struct irq_chip mpc52xx_main_irqchip = {
.name = "MPC52xx Main",
.irq_mask = mpc52xx_main_mask,
.irq_mask_ack = mpc52xx_main_mask,
.irq_unmask = mpc52xx_main_unmask,
.irq_set_type = mpc52xx_null_set_type,
};
/*
* Peripherals interrupt irq_chip
*/
static void mpc52xx_periph_mask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->per_mask, 31 - l2irq);
}
static void mpc52xx_periph_unmask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&intr->per_mask, 31 - l2irq);
}
static struct irq_chip mpc52xx_periph_irqchip = {
.name = "MPC52xx Peripherals",
.irq_mask = mpc52xx_periph_mask,
.irq_mask_ack = mpc52xx_periph_mask,
.irq_unmask = mpc52xx_periph_unmask,
.irq_set_type = mpc52xx_null_set_type,
};
/*
* SDMA interrupt irq_chip
*/
static void mpc52xx_sdma_mask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&sdma->IntMask, l2irq);
}
static void mpc52xx_sdma_unmask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&sdma->IntMask, l2irq);
}
static void mpc52xx_sdma_ack(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
out_be32(&sdma->IntPend, 1 << l2irq);
}
static struct irq_chip mpc52xx_sdma_irqchip = {
.name = "MPC52xx SDMA",
.irq_mask = mpc52xx_sdma_mask,
.irq_unmask = mpc52xx_sdma_unmask,
.irq_ack = mpc52xx_sdma_ack,
.irq_set_type = mpc52xx_null_set_type,
};
/**
* mpc52xx_is_extirq - Returns true if hwirq number is for an external IRQ
*/
static int mpc52xx_is_extirq(int l1, int l2)
{
return ((l1 == 0) && (l2 == 0)) ||
((l1 == 1) && (l2 >= 1) && (l2 <= 3));
}
/**
* mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property
*/
static int mpc52xx_irqhost_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
{
int intrvect_l1;
int intrvect_l2;
int intrvect_type;
int intrvect_linux;
if (intsize != 3)
return -1;
intrvect_l1 = (int)intspec[0];
intrvect_l2 = (int)intspec[1];
intrvect_type = (int)intspec[2] & 0x3;
intrvect_linux = (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) &
MPC52xx_IRQ_L1_MASK;
intrvect_linux |= intrvect_l2 & MPC52xx_IRQ_L2_MASK;
*out_hwirq = intrvect_linux;
*out_flags = IRQ_TYPE_LEVEL_LOW;
if (mpc52xx_is_extirq(intrvect_l1, intrvect_l2))
*out_flags = mpc52xx_map_senses[intrvect_type];
pr_debug("return %x, l1=%d, l2=%d\n", intrvect_linux, intrvect_l1,
intrvect_l2);
return 0;
}
/**
* mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure
*/
static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t irq)
{
int l1irq;
int l2irq;
struct irq_chip *irqchip;
void *hndlr;
int type;
u32 reg;
l1irq = (irq & MPC52xx_IRQ_L1_MASK) >> MPC52xx_IRQ_L1_OFFSET;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
/*
* External IRQs are handled differently by the hardware so they are
* handled by a dedicated irq_chip structure.
*/
if (mpc52xx_is_extirq(l1irq, l2irq)) {
reg = in_be32(&intr->ctrl);
type = mpc52xx_map_senses[(reg >> (22 - l2irq * 2)) & 0x3];
if ((type == IRQ_TYPE_EDGE_FALLING) ||
(type == IRQ_TYPE_EDGE_RISING))
hndlr = handle_edge_irq;
else
hndlr = handle_level_irq;
irq_set_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr);
pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n",
__func__, l2irq, virq, (int)irq, type);
return 0;
}
/* It is an internal SOC irq. Choose the correct irq_chip */
switch (l1irq) {
case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break;
case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
case MPC52xx_IRQ_L1_CRIT:
pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
__func__, l2irq);
irq_set_chip(virq, &no_irq_chip);
return 0;
}
irq_set_chip_and_handler(virq, irqchip, handle_level_irq);
pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq);
return 0;
}
static const struct irq_domain_ops mpc52xx_irqhost_ops = {
.xlate = mpc52xx_irqhost_xlate,
.map = mpc52xx_irqhost_map,
};
/**
* mpc52xx_init_irq - Initialize and register with the virq subsystem
*
* Hook for setting up IRQs on an mpc5200 system. A pointer to this function
* is to be put into the machine definition structure.
*
* This function searches the device tree for an MPC5200 interrupt controller,
* initializes it, and registers it with the virq subsystem.
*/
void __init mpc52xx_init_irq(void)
{
u32 intr_ctrl;
struct device_node *picnode;
struct device_node *np;
/* Remap the necessary zones */
picnode = of_find_matching_node(NULL, mpc52xx_pic_ids);
intr = of_iomap(picnode, 0);
if (!intr)
panic(__FILE__ ": find_and_map failed on 'mpc5200-pic'. "
"Check node !");
np = of_find_matching_node(NULL, mpc52xx_sdma_ids);
sdma = of_iomap(np, 0);
of_node_put(np);
if (!sdma)
panic(__FILE__ ": find_and_map failed on 'mpc5200-bestcomm'. "
"Check node !");
pr_debug("MPC5200 IRQ controller mapped to 0x%p\n", intr);
/* Disable all interrupt sources. */
out_be32(&sdma->IntPend, 0xffffffff); /* 1 means clear pending */
out_be32(&sdma->IntMask, 0xffffffff); /* 1 means disabled */
out_be32(&intr->per_mask, 0x7ffffc00); /* 1 means disabled */
out_be32(&intr->main_mask, 0x00010fff); /* 1 means disabled */
intr_ctrl = in_be32(&intr->ctrl);
intr_ctrl &= 0x00ff0000; /* Keeps IRQ[0-3] config */
intr_ctrl |= 0x0f000000 | /* clear IRQ 0-3 */
0x00001000 | /* MEE master external enable */
0x00000000 | /* 0 means disable IRQ 0-3 */
0x00000001; /* CEb route critical normally */
out_be32(&intr->ctrl, intr_ctrl);
/* Zero a bunch of the priority settings. */
out_be32(&intr->per_pri1, 0);
out_be32(&intr->per_pri2, 0);
out_be32(&intr->per_pri3, 0);
out_be32(&intr->main_pri1, 0);
out_be32(&intr->main_pri2, 0);
/*
* As last step, add an irq host to translate the real
* hw irq information provided by the ofw to linux virq
*/
mpc52xx_irqhost = irq_domain_add_linear(picnode,
MPC52xx_IRQ_HIGHTESTHWIRQ,
&mpc52xx_irqhost_ops, NULL);
if (!mpc52xx_irqhost)
panic(__FILE__ ": Cannot allocate the IRQ host\n");
irq_set_default_host(mpc52xx_irqhost);
pr_info("MPC52xx PIC is up and running!\n");
}
/**
* mpc52xx_get_irq - Get pending interrupt number hook function
*
* Called by the interrupt handler to determine what IRQ handler needs to be
* executed.
*
* Status of pending interrupts is determined by reading the encoded status
* register. The encoded status register has three fields; one for each of the
* types of interrupts defined by the controller - 'critical', 'main' and
* 'peripheral'. This function reads the status register and returns the IRQ
* number associated with the highest priority pending interrupt. 'Critical'
* interrupts have the highest priority, followed by 'main' interrupts, and
* then 'peripheral'.
*
* The mpc5200 interrupt controller can be configured to boost the priority
* of individual 'peripheral' interrupts. If this is the case then a special
* value will appear in either the crit or main fields indicating a high
* or medium priority peripheral irq has occurred.
*
* This function checks each of the 3 irq request fields and returns the
* first pending interrupt that it finds.
*
* This function also identifies a 4th type of interrupt; 'bestcomm'. Each
* bestcomm DMA task can raise the bestcomm peripheral interrupt. When this
* occurs at task-specific IRQ# is decoded so that each task can have its
* own IRQ handler.
*/
unsigned int mpc52xx_get_irq(void)
{
u32 status;
int irq;
status = in_be32(&intr->enc_status);
if (status & 0x00000400) { /* critical */
irq = (status >> 8) & 0x3;
if (irq == 2) /* high priority peripheral */
goto peripheral;
irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET);
} else if (status & 0x00200000) { /* main */
irq = (status >> 16) & 0x1f;
if (irq == 4) /* low priority peripheral */
goto peripheral;
irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET);
} else if (status & 0x20000000) { /* peripheral */
peripheral:
irq = (status >> 24) & 0x1f;
if (irq == 0) { /* bestcomm */
status = in_be32(&sdma->IntPend);
irq = ffs(status) - 1;
irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET);
} else {
irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET);
}
} else {
return 0;
}
return irq_linear_revmap(mpc52xx_irqhost, irq);
}
| linux-master | arch/powerpc/platforms/52xx/mpc52xx_pic.c |
/*
* Efika 5K2 platform code
* Some code really inspired from the lite5200b platform.
*
* Copyright (C) 2006 bplan GmbH
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/init.h>
#include <generated/utsrelease.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <asm/dma.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/mpc52xx.h>
#define EFIKA_PLATFORM_NAME "Efika"
/* ------------------------------------------------------------------------ */
/* PCI accesses thru RTAS */
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PCI
/*
* Access functions for PCI config space using RTAS calls.
*/
static int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 * val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
| (((bus->number - hose->first_busno) & 0xff) << 16)
| (hose->global_number << 24);
int ret = -1;
int rval;
rval = rtas_call(rtas_function_token(RTAS_FN_READ_PCI_CONFIG), 2, 2, &ret, addr, len);
*val = ret;
return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
}
static int rtas_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
| (((bus->number - hose->first_busno) & 0xff) << 16)
| (hose->global_number << 24);
int rval;
rval = rtas_call(rtas_function_token(RTAS_FN_WRITE_PCI_CONFIG), 3, 1, NULL,
addr, len, val);
return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
}
static struct pci_ops rtas_pci_ops = {
.read = rtas_read_config,
.write = rtas_write_config,
};
static void __init efika_pcisetup(void)
{
const int *bus_range;
int len;
struct pci_controller *hose;
struct device_node *root;
struct device_node *pcictrl;
root = of_find_node_by_path("/");
if (root == NULL) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Unable to find the root node\n");
return;
}
for_each_child_of_node(root, pcictrl)
if (of_node_name_eq(pcictrl, "pci"))
break;
of_node_put(root);
if (pcictrl == NULL) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Unable to find the PCI bridge node\n");
return;
}
bus_range = of_get_property(pcictrl, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Can't get bus-range for %pOF\n", pcictrl);
goto out_put;
}
if (bus_range[1] == bus_range[0])
printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI bus %d",
bus_range[0]);
else
printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI buses %d..%d",
bus_range[0], bus_range[1]);
printk(" controlled by %pOF\n", pcictrl);
printk("\n");
hose = pcibios_alloc_controller(pcictrl);
if (!hose) {
printk(KERN_WARNING EFIKA_PLATFORM_NAME
": Can't allocate PCI controller structure for %pOF\n",
pcictrl);
goto out_put;
}
hose->first_busno = bus_range[0];
hose->last_busno = bus_range[1];
hose->ops = &rtas_pci_ops;
pci_process_bridge_OF_ranges(hose, pcictrl, 0);
return;
out_put:
of_node_put(pcictrl);
}
#else
static void __init efika_pcisetup(void)
{}
#endif
/* ------------------------------------------------------------------------ */
/* Platform setup */
/* ------------------------------------------------------------------------ */
static void efika_show_cpuinfo(struct seq_file *m)
{
struct device_node *root;
const char *revision;
const char *codegendescription;
const char *codegenvendor;
root = of_find_node_by_path("/");
if (!root)
return;
revision = of_get_property(root, "revision", NULL);
codegendescription = of_get_property(root, "CODEGEN,description", NULL);
codegenvendor = of_get_property(root, "CODEGEN,vendor", NULL);
if (codegendescription)
seq_printf(m, "machine\t\t: %s\n", codegendescription);
else
seq_printf(m, "machine\t\t: Efika\n");
if (revision)
seq_printf(m, "revision\t: %s\n", revision);
if (codegenvendor)
seq_printf(m, "vendor\t\t: %s\n", codegenvendor);
of_node_put(root);
}
#ifdef CONFIG_PM
static void efika_suspend_prepare(void __iomem *mbar)
{
u8 pin = 4; /* GPIO_WKUP_4 (GPIO_PSC6_0 - IRDA_RX) */
u8 level = 1; /* wakeup on high level */
/* IOW. to wake it up, short pins 1 and 3 on IRDA connector */
mpc52xx_set_wakeup_gpio(pin, level);
}
#endif
static void __init efika_setup_arch(void)
{
rtas_initialize();
/* Map important registers from the internal memory map */
mpc52xx_map_common_devices();
#ifdef CONFIG_PM
mpc52xx_suspend.board_suspend_prepare = efika_suspend_prepare;
mpc52xx_pm_init();
#endif
if (ppc_md.progress)
ppc_md.progress("Linux/PPC " UTS_RELEASE " running on Efika ;-)\n", 0x0);
}
static int __init efika_probe(void)
{
const char *model = of_get_property(of_root, "model", NULL);
if (model == NULL)
return 0;
if (strcmp(model, "EFIKA5K2"))
return 0;
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
pm_power_off = rtas_power_off;
return 1;
}
define_machine(efika)
{
.name = EFIKA_PLATFORM_NAME,
.probe = efika_probe,
.setup_arch = efika_setup_arch,
.discover_phbs = efika_pcisetup,
.init = mpc52xx_declare_of_platform_devices,
.show_cpuinfo = efika_show_cpuinfo,
.init_IRQ = mpc52xx_init_irq,
.get_irq = mpc52xx_get_irq,
.restart = rtas_restart,
.halt = rtas_halt,
.set_rtc_time = rtas_set_rtc_time,
.get_rtc_time = rtas_get_rtc_time,
.progress = rtas_progress,
.get_boot_time = rtas_get_boot_time,
#ifdef CONFIG_PCI
.phys_mem_access_prot = pci_phys_mem_access_prot,
#endif
};
| linux-master | arch/powerpc/platforms/52xx/efika.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale Lite5200 board support
*
* Written by: Grant Likely <[email protected]>
*
* Copyright (C) Secret Lab Technologies Ltd. 2006. All rights reserved.
* Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Description:
*/
#undef DEBUG
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/root_dev.h>
#include <linux/initrd.h>
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/mpc52xx.h>
/* ************************************************************************
*
* Setup the architecture
*
*/
/* mpc5200 device tree match tables */
static const struct of_device_id mpc5200_cdm_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-cdm", },
{ .compatible = "mpc5200-cdm", },
{}
};
static const struct of_device_id mpc5200_gpio_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-gpio", },
{ .compatible = "mpc5200-gpio", },
{}
};
/*
* Fix clock configuration.
*
* Firmware is supposed to be responsible for this. If you are creating a
* new board port, do *NOT* duplicate this code. Fix your boot firmware
* to set it correctly in the first place
*/
static void __init
lite5200_fix_clock_config(void)
{
struct device_node *np;
struct mpc52xx_cdm __iomem *cdm;
/* Map zones */
np = of_find_matching_node(NULL, mpc5200_cdm_ids);
cdm = of_iomap(np, 0);
of_node_put(np);
if (!cdm) {
printk(KERN_ERR "%s() failed; expect abnormal behaviour\n",
__func__);
return;
}
/* Use internal 48 Mhz */
out_8(&cdm->ext_48mhz_en, 0x00);
out_8(&cdm->fd_enable, 0x01);
if (in_be32(&cdm->rstcfg) & 0x40) /* Assumes 33Mhz clock */
out_be16(&cdm->fd_counters, 0x0001);
else
out_be16(&cdm->fd_counters, 0x5555);
/* Unmap the regs */
iounmap(cdm);
}
/*
* Fix setting of port_config register.
*
* Firmware is supposed to be responsible for this. If you are creating a
* new board port, do *NOT* duplicate this code. Fix your boot firmware
* to set it correctly in the first place
*/
static void __init
lite5200_fix_port_config(void)
{
struct device_node *np;
struct mpc52xx_gpio __iomem *gpio;
u32 port_config;
np = of_find_matching_node(NULL, mpc5200_gpio_ids);
gpio = of_iomap(np, 0);
of_node_put(np);
if (!gpio) {
printk(KERN_ERR "%s() failed. expect abnormal behavior\n",
__func__);
return;
}
/* Set port config */
port_config = in_be32(&gpio->port_config);
port_config &= ~0x00800000; /* 48Mhz internal, pin is GPIO */
port_config &= ~0x00007000; /* USB port : Differential mode */
port_config |= 0x00001000; /* USB 1 only */
port_config &= ~0x03000000; /* ATA CS is on csb_4/5 */
port_config |= 0x01000000;
pr_debug("port_config: old:%x new:%x\n",
in_be32(&gpio->port_config), port_config);
out_be32(&gpio->port_config, port_config);
/* Unmap zone */
iounmap(gpio);
}
#ifdef CONFIG_PM
static void lite5200_suspend_prepare(void __iomem *mbar)
{
u8 pin = 1; /* GPIO_WKUP_1 (GPIO_PSC2_4) */
u8 level = 0; /* wakeup on low level */
mpc52xx_set_wakeup_gpio(pin, level);
/*
* power down usb port
* this needs to be called before of-ohci suspend code
*/
/* set ports to "power switched" and "powered at the same time"
* USB Rh descriptor A: NPS = 0, PSM = 0 */
out_be32(mbar + 0x1048, in_be32(mbar + 0x1048) & ~0x300);
/* USB Rh status: LPS = 1 - turn off power */
out_be32(mbar + 0x1050, 0x00000001);
}
static void lite5200_resume_finish(void __iomem *mbar)
{
/* USB Rh status: LPSC = 1 - turn on power */
out_be32(mbar + 0x1050, 0x00010000);
}
#endif
static void __init lite5200_setup_arch(void)
{
if (ppc_md.progress)
ppc_md.progress("lite5200_setup_arch()", 0);
/* Map important registers from the internal memory map */
mpc52xx_map_common_devices();
/* Some mpc5200 & mpc5200b related configuration */
mpc5200_setup_xlb_arbiter();
/* Fix things that firmware should have done. */
lite5200_fix_clock_config();
lite5200_fix_port_config();
#ifdef CONFIG_PM
mpc52xx_suspend.board_suspend_prepare = lite5200_suspend_prepare;
mpc52xx_suspend.board_resume_finish = lite5200_resume_finish;
lite5200_pm_init();
#endif
}
static const char * const board[] __initconst = {
"fsl,lite5200",
"fsl,lite5200b",
NULL,
};
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
static int __init lite5200_probe(void)
{
return of_device_compatible_match(of_root, board);
}
define_machine(lite5200) {
.name = "lite5200",
.probe = lite5200_probe,
.setup_arch = lite5200_setup_arch,
.discover_phbs = mpc52xx_setup_pci,
.init = mpc52xx_declare_of_platform_devices,
.init_IRQ = mpc52xx_init_irq,
.get_irq = mpc52xx_get_irq,
.restart = mpc52xx_restart,
};
| linux-master | arch/powerpc/platforms/52xx/lite5200.c |
/*
* PCI code for the Freescale MPC52xx embedded CPU.
*
* Copyright (C) 2006 Secret Lab Technologies Ltd.
* Grant Likely <[email protected]>
* Copyright (C) 2004 Sylvain Munaut <[email protected]>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#undef DEBUG
#include <linux/pci.h>
#include <linux/of_address.h>
#include <asm/mpc52xx.h>
#include <asm/delay.h>
#include <asm/machdep.h>
#include <linux/kernel.h>
/* ======================================================================== */
/* Structures mapping & Defines for PCI Unit */
/* ======================================================================== */
#define MPC52xx_PCI_GSCR_BM 0x40000000
#define MPC52xx_PCI_GSCR_PE 0x20000000
#define MPC52xx_PCI_GSCR_SE 0x10000000
#define MPC52xx_PCI_GSCR_XLB2PCI_MASK 0x07000000
#define MPC52xx_PCI_GSCR_XLB2PCI_SHIFT 24
#define MPC52xx_PCI_GSCR_IPG2PCI_MASK 0x00070000
#define MPC52xx_PCI_GSCR_IPG2PCI_SHIFT 16
#define MPC52xx_PCI_GSCR_BME 0x00004000
#define MPC52xx_PCI_GSCR_PEE 0x00002000
#define MPC52xx_PCI_GSCR_SEE 0x00001000
#define MPC52xx_PCI_GSCR_PR 0x00000001
#define MPC52xx_PCI_IWBTAR_TRANSLATION(proc_ad,pci_ad,size) \
( ( (proc_ad) & 0xff000000 ) | \
( (((size) - 1) >> 8) & 0x00ff0000 ) | \
( ((pci_ad) >> 16) & 0x0000ff00 ) )
#define MPC52xx_PCI_IWCR_PACK(win0,win1,win2) (((win0) << 24) | \
((win1) << 16) | \
((win2) << 8))
#define MPC52xx_PCI_IWCR_DISABLE 0x0
#define MPC52xx_PCI_IWCR_ENABLE 0x1
#define MPC52xx_PCI_IWCR_READ 0x0
#define MPC52xx_PCI_IWCR_READ_LINE 0x2
#define MPC52xx_PCI_IWCR_READ_MULTI 0x4
#define MPC52xx_PCI_IWCR_MEM 0x0
#define MPC52xx_PCI_IWCR_IO 0x8
#define MPC52xx_PCI_TCR_P 0x01000000
#define MPC52xx_PCI_TCR_LD 0x00010000
#define MPC52xx_PCI_TCR_WCT8 0x00000008
#define MPC52xx_PCI_TBATR_DISABLE 0x0
#define MPC52xx_PCI_TBATR_ENABLE 0x1
struct mpc52xx_pci {
u32 idr; /* PCI + 0x00 */
u32 scr; /* PCI + 0x04 */
u32 ccrir; /* PCI + 0x08 */
u32 cr1; /* PCI + 0x0C */
u32 bar0; /* PCI + 0x10 */
u32 bar1; /* PCI + 0x14 */
u8 reserved1[16]; /* PCI + 0x18 */
u32 ccpr; /* PCI + 0x28 */
u32 sid; /* PCI + 0x2C */
u32 erbar; /* PCI + 0x30 */
u32 cpr; /* PCI + 0x34 */
u8 reserved2[4]; /* PCI + 0x38 */
u32 cr2; /* PCI + 0x3C */
u8 reserved3[32]; /* PCI + 0x40 */
u32 gscr; /* PCI + 0x60 */
u32 tbatr0; /* PCI + 0x64 */
u32 tbatr1; /* PCI + 0x68 */
u32 tcr; /* PCI + 0x6C */
u32 iw0btar; /* PCI + 0x70 */
u32 iw1btar; /* PCI + 0x74 */
u32 iw2btar; /* PCI + 0x78 */
u8 reserved4[4]; /* PCI + 0x7C */
u32 iwcr; /* PCI + 0x80 */
u32 icr; /* PCI + 0x84 */
u32 isr; /* PCI + 0x88 */
u32 arb; /* PCI + 0x8C */
u8 reserved5[104]; /* PCI + 0x90 */
u32 car; /* PCI + 0xF8 */
u8 reserved6[4]; /* PCI + 0xFC */
};
/* MPC5200 device tree match tables */
const struct of_device_id mpc52xx_pci_ids[] __initconst = {
{ .type = "pci", .compatible = "fsl,mpc5200-pci", },
{ .type = "pci", .compatible = "mpc5200-pci", },
{}
};
/* ======================================================================== */
/* PCI configuration access */
/* ======================================================================== */
static int
mpc52xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
u32 value;
if (ppc_md.pci_exclude_device)
if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
out_be32(hose->cfg_addr,
(1 << 31) |
(bus->number << 16) |
(devfn << 8) |
(offset & 0xfc));
mb();
#if defined(CONFIG_PPC_MPC5200_BUGFIX)
if (bus->number) {
/* workaround for the bug 435 of the MPC5200 (L25R);
* Don't do 32 bits config access during type-1 cycles */
switch (len) {
case 1:
value = in_8(((u8 __iomem *)hose->cfg_data) +
(offset & 3));
break;
case 2:
value = in_le16(((u16 __iomem *)hose->cfg_data) +
((offset>>1) & 1));
break;
default:
value = in_le16((u16 __iomem *)hose->cfg_data) |
(in_le16(((u16 __iomem *)hose->cfg_data) + 1) << 16);
break;
}
}
else
#endif
{
value = in_le32(hose->cfg_data);
if (len != 4) {
value >>= ((offset & 0x3) << 3);
value &= 0xffffffff >> (32 - (len << 3));
}
}
*val = value;
out_be32(hose->cfg_addr, 0);
mb();
return PCIBIOS_SUCCESSFUL;
}
static int
mpc52xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
u32 value, mask;
if (ppc_md.pci_exclude_device)
if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
out_be32(hose->cfg_addr,
(1 << 31) |
(bus->number << 16) |
(devfn << 8) |
(offset & 0xfc));
mb();
#if defined(CONFIG_PPC_MPC5200_BUGFIX)
if (bus->number) {
/* workaround for the bug 435 of the MPC5200 (L25R);
* Don't do 32 bits config access during type-1 cycles */
switch (len) {
case 1:
out_8(((u8 __iomem *)hose->cfg_data) +
(offset & 3), val);
break;
case 2:
out_le16(((u16 __iomem *)hose->cfg_data) +
((offset>>1) & 1), val);
break;
default:
out_le16((u16 __iomem *)hose->cfg_data,
(u16)val);
out_le16(((u16 __iomem *)hose->cfg_data) + 1,
(u16)(val>>16));
break;
}
}
else
#endif
{
if (len != 4) {
value = in_le32(hose->cfg_data);
offset = (offset & 0x3) << 3;
mask = (0xffffffff >> (32 - (len << 3)));
mask <<= offset;
value &= ~mask;
val = value | ((val << offset) & mask);
}
out_le32(hose->cfg_data, val);
}
mb();
out_be32(hose->cfg_addr, 0);
mb();
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops mpc52xx_pci_ops = {
.read = mpc52xx_pci_read_config,
.write = mpc52xx_pci_write_config
};
/* ======================================================================== */
/* PCI setup */
/* ======================================================================== */
static void __init
mpc52xx_pci_setup(struct pci_controller *hose,
struct mpc52xx_pci __iomem *pci_regs, phys_addr_t pci_phys)
{
struct resource *res;
u32 tmp;
int iwcr0 = 0, iwcr1 = 0, iwcr2 = 0;
pr_debug("%s(hose=%p, pci_regs=%p)\n", __func__, hose, pci_regs);
/* pci_process_bridge_OF_ranges() found all our addresses for us;
* now store them in the right places */
hose->cfg_addr = &pci_regs->car;
hose->cfg_data = hose->io_base_virt;
/* Control regs */
tmp = in_be32(&pci_regs->scr);
tmp |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
out_be32(&pci_regs->scr, tmp);
/* Memory windows */
res = &hose->mem_resources[0];
if (res->flags) {
pr_debug("mem_resource[0] = %pr\n", res);
out_be32(&pci_regs->iw0btar,
MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start,
resource_size(res)));
iwcr0 = MPC52xx_PCI_IWCR_ENABLE | MPC52xx_PCI_IWCR_MEM;
if (res->flags & IORESOURCE_PREFETCH)
iwcr0 |= MPC52xx_PCI_IWCR_READ_MULTI;
else
iwcr0 |= MPC52xx_PCI_IWCR_READ;
}
res = &hose->mem_resources[1];
if (res->flags) {
pr_debug("mem_resource[1] = %pr\n", res);
out_be32(&pci_regs->iw1btar,
MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start,
resource_size(res)));
iwcr1 = MPC52xx_PCI_IWCR_ENABLE | MPC52xx_PCI_IWCR_MEM;
if (res->flags & IORESOURCE_PREFETCH)
iwcr1 |= MPC52xx_PCI_IWCR_READ_MULTI;
else
iwcr1 |= MPC52xx_PCI_IWCR_READ;
}
/* IO resources */
res = &hose->io_resource;
if (!res) {
printk(KERN_ERR "%s: Didn't find IO resources\n", __FILE__);
return;
}
pr_debug(".io_resource = %pr .io_base_phys=0x%pa\n",
res, &hose->io_base_phys);
out_be32(&pci_regs->iw2btar,
MPC52xx_PCI_IWBTAR_TRANSLATION(hose->io_base_phys,
res->start,
resource_size(res)));
iwcr2 = MPC52xx_PCI_IWCR_ENABLE | MPC52xx_PCI_IWCR_IO;
/* Set all the IWCR fields at once; they're in the same reg */
out_be32(&pci_regs->iwcr, MPC52xx_PCI_IWCR_PACK(iwcr0, iwcr1, iwcr2));
/* Map IMMR onto PCI bus */
pci_phys &= 0xfffc0000; /* bar0 has only 14 significant bits */
out_be32(&pci_regs->tbatr0, MPC52xx_PCI_TBATR_ENABLE | pci_phys);
out_be32(&pci_regs->bar0, PCI_BASE_ADDRESS_MEM_PREFETCH | pci_phys);
/* Map memory onto PCI bus */
out_be32(&pci_regs->tbatr1, MPC52xx_PCI_TBATR_ENABLE);
out_be32(&pci_regs->bar1, PCI_BASE_ADDRESS_MEM_PREFETCH);
out_be32(&pci_regs->tcr, MPC52xx_PCI_TCR_LD | MPC52xx_PCI_TCR_WCT8);
tmp = in_be32(&pci_regs->gscr);
#if 0
/* Reset the exteral bus ( internal PCI controller is NOT reset ) */
/* Not necessary and can be a bad thing if for example the bootloader
is displaying a splash screen or ... Just left here for
documentation purpose if anyone need it */
out_be32(&pci_regs->gscr, tmp | MPC52xx_PCI_GSCR_PR);
udelay(50);
#endif
/* Make sure the PCI bridge is out of reset */
out_be32(&pci_regs->gscr, tmp & ~MPC52xx_PCI_GSCR_PR);
}
static void
mpc52xx_pci_fixup_resources(struct pci_dev *dev)
{
struct resource *res;
pr_debug("%s() %.4x:%.4x\n", __func__, dev->vendor, dev->device);
/* We don't rely on boot loader for PCI and resets all
devices */
pci_dev_for_each_resource(dev, res) {
if (res->end > res->start) { /* Only valid resources */
res->end -= res->start;
res->start = 0;
res->flags |= IORESOURCE_UNSET;
}
}
/* The PCI Host bridge of MPC52xx has a prefetch memory resource
fixed to 1Gb. Doesn't fit in the resource system so we remove it */
if ( (dev->vendor == PCI_VENDOR_ID_MOTOROLA) &&
( dev->device == PCI_DEVICE_ID_MOTOROLA_MPC5200
|| dev->device == PCI_DEVICE_ID_MOTOROLA_MPC5200B) ) {
struct resource *res = &dev->resource[1];
res->start = res->end = res->flags = 0;
}
}
int __init
mpc52xx_add_bridge(struct device_node *node)
{
int len;
struct mpc52xx_pci __iomem *pci_regs;
struct pci_controller *hose;
const int *bus_range;
struct resource rsrc;
pr_debug("Adding MPC52xx PCI host bridge %pOF\n", node);
pci_add_flags(PCI_REASSIGN_ALL_BUS);
if (of_address_to_resource(node, 0, &rsrc) != 0) {
printk(KERN_ERR "Can't get %pOF resources\n", node);
return -EINVAL;
}
bus_range = of_get_property(node, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get %pOF bus-range, assume bus 0\n",
node);
bus_range = NULL;
}
/* There are some PCI quirks on the 52xx, register the hook to
* fix them. */
ppc_md.pcibios_fixup_resources = mpc52xx_pci_fixup_resources;
/* Alloc and initialize the pci controller. Values in the device
* tree are needed to configure the 52xx PCI controller. Rather
* than parse the tree here, let pci_process_bridge_OF_ranges()
* do it for us and extract the values after the fact */
hose = pcibios_alloc_controller(node);
if (!hose)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
hose->ops = &mpc52xx_pci_ops;
pci_regs = ioremap(rsrc.start, resource_size(&rsrc));
if (!pci_regs)
return -ENOMEM;
pci_process_bridge_OF_ranges(hose, node, 1);
/* Finish setting up PCI using values obtained by
* pci_proces_bridge_OF_ranges */
mpc52xx_pci_setup(hose, pci_regs, rsrc.start);
return 0;
}
void __init mpc52xx_setup_pci(void)
{
struct device_node *pci;
pci = of_find_matching_node(NULL, mpc52xx_pci_ids);
if (!pci)
return;
mpc52xx_add_bridge(pci);
of_node_put(pci);
}
| linux-master | arch/powerpc/platforms/52xx/mpc52xx_pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Support for 'media5200-platform' compatible boards.
*
* Copyright (C) 2008 Secret Lab Technologies Ltd.
*
* Description:
* This code implements support for the Freescape Media5200 platform
* (built around the MPC5200 SoC).
*
* Notable characteristic of the Media5200 is the presence of an FPGA
* that has all external IRQ lines routed through it. This file implements
* a cascaded interrupt controller driver which attaches itself to the
* Virtual IRQ subsystem after the primary mpc5200 interrupt controller
* is initialized.
*/
#undef DEBUG
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/mpc52xx.h>
static const struct of_device_id mpc5200_gpio_ids[] __initconst = {
{ .compatible = "fsl,mpc5200-gpio", },
{ .compatible = "mpc5200-gpio", },
{}
};
/* FPGA register set */
#define MEDIA5200_IRQ_ENABLE (0x40c)
#define MEDIA5200_IRQ_STATUS (0x410)
#define MEDIA5200_NUM_IRQS (6)
#define MEDIA5200_IRQ_SHIFT (32 - MEDIA5200_NUM_IRQS)
struct media5200_irq {
void __iomem *regs;
spinlock_t lock;
struct irq_domain *irqhost;
};
struct media5200_irq media5200_irq;
static void media5200_irq_unmask(struct irq_data *d)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&media5200_irq.lock, flags);
val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE);
val |= 1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d));
out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val);
spin_unlock_irqrestore(&media5200_irq.lock, flags);
}
static void media5200_irq_mask(struct irq_data *d)
{
unsigned long flags;
u32 val;
spin_lock_irqsave(&media5200_irq.lock, flags);
val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE);
val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d)));
out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val);
spin_unlock_irqrestore(&media5200_irq.lock, flags);
}
static struct irq_chip media5200_irq_chip = {
.name = "Media5200 FPGA",
.irq_unmask = media5200_irq_unmask,
.irq_mask = media5200_irq_mask,
.irq_mask_ack = media5200_irq_mask,
};
static void media5200_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
int val;
u32 status, enable;
/* Mask off the cascaded IRQ */
raw_spin_lock(&desc->lock);
chip->irq_mask(&desc->irq_data);
raw_spin_unlock(&desc->lock);
/* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs
* are pending. 'ffs()' is 1 based */
status = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE);
enable = in_be32(media5200_irq.regs + MEDIA5200_IRQ_STATUS);
val = ffs((status & enable) >> MEDIA5200_IRQ_SHIFT);
if (val) {
generic_handle_domain_irq(media5200_irq.irqhost, val - 1);
/* pr_debug("%s: virq=%i s=%.8x e=%.8x hwirq=%i\n",
* __func__, virq, status, enable, val - 1);
*/
}
/* Processing done; can reenable the cascade now */
raw_spin_lock(&desc->lock);
chip->irq_ack(&desc->irq_data);
if (!irqd_irq_disabled(&desc->irq_data))
chip->irq_unmask(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
static int media5200_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw);
irq_set_chip_data(virq, &media5200_irq);
irq_set_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq);
irq_set_status_flags(virq, IRQ_LEVEL);
return 0;
}
static int media5200_irq_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
{
if (intsize != 2)
return -1;
pr_debug("%s: bank=%i, number=%i\n", __func__, intspec[0], intspec[1]);
*out_hwirq = intspec[1];
*out_flags = IRQ_TYPE_NONE;
return 0;
}
static const struct irq_domain_ops media5200_irq_ops = {
.map = media5200_irq_map,
.xlate = media5200_irq_xlate,
};
/*
* Setup Media5200 IRQ mapping
*/
static void __init media5200_init_irq(void)
{
struct device_node *fpga_np;
int cascade_virq;
/* First setup the regular MPC5200 interrupt controller */
mpc52xx_init_irq();
/* Now find the FPGA IRQ */
fpga_np = of_find_compatible_node(NULL, NULL, "fsl,media5200-fpga");
if (!fpga_np)
goto out;
pr_debug("%s: found fpga node: %pOF\n", __func__, fpga_np);
media5200_irq.regs = of_iomap(fpga_np, 0);
if (!media5200_irq.regs)
goto out;
pr_debug("%s: mapped to %p\n", __func__, media5200_irq.regs);
cascade_virq = irq_of_parse_and_map(fpga_np, 0);
if (!cascade_virq)
goto out;
pr_debug("%s: cascaded on virq=%i\n", __func__, cascade_virq);
/* Disable all FPGA IRQs */
out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, 0);
spin_lock_init(&media5200_irq.lock);
media5200_irq.irqhost = irq_domain_add_linear(fpga_np,
MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq);
if (!media5200_irq.irqhost)
goto out;
pr_debug("%s: allocated irqhost\n", __func__);
of_node_put(fpga_np);
irq_set_handler_data(cascade_virq, &media5200_irq);
irq_set_chained_handler(cascade_virq, media5200_irq_cascade);
return;
out:
pr_err("Could not find Media5200 FPGA; PCI interrupts will not work\n");
of_node_put(fpga_np);
}
/*
* Setup the architecture
*/
static void __init media5200_setup_arch(void)
{
struct device_node *np;
struct mpc52xx_gpio __iomem *gpio;
u32 port_config;
if (ppc_md.progress)
ppc_md.progress("media5200_setup_arch()", 0);
/* Map important registers from the internal memory map */
mpc52xx_map_common_devices();
/* Some mpc5200 & mpc5200b related configuration */
mpc5200_setup_xlb_arbiter();
np = of_find_matching_node(NULL, mpc5200_gpio_ids);
gpio = of_iomap(np, 0);
of_node_put(np);
if (!gpio) {
printk(KERN_ERR "%s() failed. expect abnormal behavior\n",
__func__);
return;
}
/* Set port config */
port_config = in_be32(&gpio->port_config);
port_config &= ~0x03000000; /* ATA CS is on csb_4/5 */
port_config |= 0x01000000;
out_be32(&gpio->port_config, port_config);
/* Unmap zone */
iounmap(gpio);
}
define_machine(media5200_platform) {
.name = "media5200-platform",
.compatible = "fsl,media5200",
.setup_arch = media5200_setup_arch,
.discover_phbs = mpc52xx_setup_pci,
.init = mpc52xx_declare_of_platform_devices,
.init_IRQ = media5200_init_irq,
.get_irq = mpc52xx_get_irq,
.restart = mpc52xx_restart,
};
| linux-master | arch/powerpc/platforms/52xx/media5200.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2005 Sven Luther <[email protected]>
* Thanks to :
* Dale Farnsworth <[email protected]>
* Mark A. Greer <[email protected]>
* Nicolas DET <[email protected]>
* Benjamin Herrenschmidt <[email protected]>
* And anyone else who helped me on this.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/mv643xx.h>
#include <linux/pci.h>
#define PEGASOS2_MARVELL_REGBASE (0xf1000000)
#define PEGASOS2_MARVELL_REGSIZE (0x00004000)
#define PEGASOS2_SRAM_BASE (0xf2000000)
#define PEGASOS2_SRAM_SIZE (256*1024)
#define PEGASOS2_SRAM_BASE_ETH_PORT0 (PEGASOS2_SRAM_BASE)
#define PEGASOS2_SRAM_BASE_ETH_PORT1 (PEGASOS2_SRAM_BASE_ETH_PORT0 + (PEGASOS2_SRAM_SIZE / 2) )
#define PEGASOS2_SRAM_RXRING_SIZE (PEGASOS2_SRAM_SIZE/4)
#define PEGASOS2_SRAM_TXRING_SIZE (PEGASOS2_SRAM_SIZE/4)
#undef BE_VERBOSE
static struct resource mv643xx_eth_shared_resources[] = {
[0] = {
.name = "ethernet shared base",
.start = 0xf1000000 + MV643XX_ETH_SHARED_REGS,
.end = 0xf1000000 + MV643XX_ETH_SHARED_REGS +
MV643XX_ETH_SHARED_REGS_SIZE - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device mv643xx_eth_shared_device = {
.name = MV643XX_ETH_SHARED_NAME,
.id = 0,
.num_resources = ARRAY_SIZE(mv643xx_eth_shared_resources),
.resource = mv643xx_eth_shared_resources,
};
/*
* The orion mdio driver only covers shared + 0x4 up to shared + 0x84 - 1
*/
static struct resource mv643xx_eth_mvmdio_resources[] = {
[0] = {
.name = "ethernet mdio base",
.start = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x4,
.end = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x83,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device mv643xx_eth_mvmdio_device = {
.name = "orion-mdio",
.id = -1,
.num_resources = ARRAY_SIZE(mv643xx_eth_mvmdio_resources),
.resource = mv643xx_eth_mvmdio_resources,
};
static struct resource mv643xx_eth_port1_resources[] = {
[0] = {
.name = "eth port1 irq",
.start = 9,
.end = 9,
.flags = IORESOURCE_IRQ,
},
};
static struct mv643xx_eth_platform_data eth_port1_pd = {
.shared = &mv643xx_eth_shared_device,
.port_number = 1,
.phy_addr = MV643XX_ETH_PHY_ADDR(7),
.tx_sram_addr = PEGASOS2_SRAM_BASE_ETH_PORT1,
.tx_sram_size = PEGASOS2_SRAM_TXRING_SIZE,
.tx_queue_size = PEGASOS2_SRAM_TXRING_SIZE/16,
.rx_sram_addr = PEGASOS2_SRAM_BASE_ETH_PORT1 + PEGASOS2_SRAM_TXRING_SIZE,
.rx_sram_size = PEGASOS2_SRAM_RXRING_SIZE,
.rx_queue_size = PEGASOS2_SRAM_RXRING_SIZE/16,
};
static struct platform_device eth_port1_device = {
.name = MV643XX_ETH_NAME,
.id = 1,
.num_resources = ARRAY_SIZE(mv643xx_eth_port1_resources),
.resource = mv643xx_eth_port1_resources,
.dev = {
.platform_data = ð_port1_pd,
},
};
static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
&mv643xx_eth_shared_device,
&mv643xx_eth_mvmdio_device,
ð_port1_device,
};
/***********/
/***********/
#define MV_READ(offset,val) { val = readl(mv643xx_reg_base + offset); }
#define MV_WRITE(offset,data) writel(data, mv643xx_reg_base + offset)
static void __iomem *mv643xx_reg_base;
static int __init Enable_SRAM(void)
{
u32 ALong;
if (mv643xx_reg_base == NULL)
mv643xx_reg_base = ioremap(PEGASOS2_MARVELL_REGBASE,
PEGASOS2_MARVELL_REGSIZE);
if (mv643xx_reg_base == NULL)
return -ENOMEM;
#ifdef BE_VERBOSE
printk("Pegasos II/Marvell MV64361: register remapped from %p to %p\n",
(void *)PEGASOS2_MARVELL_REGBASE, (void *)mv643xx_reg_base);
#endif
MV_WRITE(MV64340_SRAM_CONFIG, 0);
MV_WRITE(MV64340_INTEGRATED_SRAM_BASE_ADDR, PEGASOS2_SRAM_BASE >> 16);
MV_READ(MV64340_BASE_ADDR_ENABLE, ALong);
ALong &= ~(1 << 19);
MV_WRITE(MV64340_BASE_ADDR_ENABLE, ALong);
ALong = 0x02;
ALong |= PEGASOS2_SRAM_BASE & 0xffff0000;
MV_WRITE(MV643XX_ETH_BAR_4, ALong);
MV_WRITE(MV643XX_ETH_SIZE_REG_4, (PEGASOS2_SRAM_SIZE-1) & 0xffff0000);
MV_READ(MV643XX_ETH_BASE_ADDR_ENABLE_REG, ALong);
ALong &= ~(1 << 4);
MV_WRITE(MV643XX_ETH_BASE_ADDR_ENABLE_REG, ALong);
#ifdef BE_VERBOSE
printk("Pegasos II/Marvell MV64361: register unmapped\n");
printk("Pegasos II/Marvell MV64361: SRAM at %p, size=%x\n", (void*) PEGASOS2_SRAM_BASE, PEGASOS2_SRAM_SIZE);
#endif
iounmap(mv643xx_reg_base);
mv643xx_reg_base = NULL;
return 1;
}
/***********/
/***********/
static int __init mv643xx_eth_add_pds(void)
{
int ret = 0;
static struct pci_device_id pci_marvell_mv64360[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_MV64360) },
{ }
};
#ifdef BE_VERBOSE
printk("Pegasos II/Marvell MV64361: init\n");
#endif
if (pci_dev_present(pci_marvell_mv64360)) {
ret = platform_add_devices(mv643xx_eth_pd_devs,
ARRAY_SIZE(mv643xx_eth_pd_devs));
if ( Enable_SRAM() < 0)
{
eth_port1_pd.tx_sram_addr = 0;
eth_port1_pd.tx_sram_size = 0;
eth_port1_pd.rx_sram_addr = 0;
eth_port1_pd.rx_sram_size = 0;
#ifdef BE_VERBOSE
printk("Pegasos II/Marvell MV64361: Can't enable the "
"SRAM\n");
#endif
}
}
#ifdef BE_VERBOSE
printk("Pegasos II/Marvell MV64361: init is over\n");
#endif
return ret;
}
device_initcall(mv643xx_eth_add_pds);
| linux-master | arch/powerpc/platforms/chrp/pegasos_eth.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CHRP pci routines.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/pgtable.h>
#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/machdep.h>
#include <asm/sections.h>
#include <asm/pci-bridge.h>
#include <asm/grackle.h>
#include <asm/rtas.h>
#include "chrp.h"
#include "gg2.h"
/* LongTrail */
void __iomem *gg2_pci_config_base;
/*
* The VLSI Golden Gate II has only 512K of PCI configuration space, so we
* limit the bus number to 3 bits
*/
static int gg2_read_config(struct pci_bus *bus, unsigned int devfn, int off,
int len, u32 *val)
{
volatile void __iomem *cfg_data;
struct pci_controller *hose = pci_bus_to_host(bus);
if (bus->number > 7)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that off is
* suitably aligned and that len is 1, 2 or 4.
*/
cfg_data = hose->cfg_data + ((bus->number<<16) | (devfn<<8) | off);
switch (len) {
case 1:
*val = in_8(cfg_data);
break;
case 2:
*val = in_le16(cfg_data);
break;
default:
*val = in_le32(cfg_data);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int gg2_write_config(struct pci_bus *bus, unsigned int devfn, int off,
int len, u32 val)
{
volatile void __iomem *cfg_data;
struct pci_controller *hose = pci_bus_to_host(bus);
if (bus->number > 7)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Note: the caller has already checked that off is
* suitably aligned and that len is 1, 2 or 4.
*/
cfg_data = hose->cfg_data + ((bus->number<<16) | (devfn<<8) | off);
switch (len) {
case 1:
out_8(cfg_data, val);
break;
case 2:
out_le16(cfg_data, val);
break;
default:
out_le32(cfg_data, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops gg2_pci_ops =
{
.read = gg2_read_config,
.write = gg2_write_config,
};
/*
* Access functions for PCI config space using RTAS calls.
*/
static int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 *val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
| (((bus->number - hose->first_busno) & 0xff) << 16)
| (hose->global_number << 24);
int ret = -1;
int rval;
rval = rtas_call(rtas_function_token(RTAS_FN_READ_PCI_CONFIG), 2, 2, &ret, addr, len);
*val = ret;
return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
}
static int rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8)
| (((bus->number - hose->first_busno) & 0xff) << 16)
| (hose->global_number << 24);
int rval;
rval = rtas_call(rtas_function_token(RTAS_FN_WRITE_PCI_CONFIG), 3, 1, NULL,
addr, len, val);
return rval? PCIBIOS_DEVICE_NOT_FOUND: PCIBIOS_SUCCESSFUL;
}
static struct pci_ops rtas_pci_ops =
{
.read = rtas_read_config,
.write = rtas_write_config,
};
volatile struct Hydra __iomem *Hydra = NULL;
static int __init hydra_init(void)
{
struct device_node *np;
struct resource r;
np = of_find_node_by_name(NULL, "mac-io");
if (np == NULL || of_address_to_resource(np, 0, &r)) {
of_node_put(np);
return 0;
}
of_node_put(np);
Hydra = ioremap(r.start, resource_size(&r));
printk("Hydra Mac I/O at %llx\n", (unsigned long long)r.start);
printk("Hydra Feature_Control was %x",
in_le32(&Hydra->Feature_Control));
out_le32(&Hydra->Feature_Control, (HYDRA_FC_SCC_CELL_EN |
HYDRA_FC_SCSI_CELL_EN |
HYDRA_FC_SCCA_ENABLE |
HYDRA_FC_SCCB_ENABLE |
HYDRA_FC_ARB_BYPASS |
HYDRA_FC_MPIC_ENABLE |
HYDRA_FC_SLOW_SCC_PCLK |
HYDRA_FC_MPIC_IS_MASTER));
printk(", now %x\n", in_le32(&Hydra->Feature_Control));
return 1;
}
#define PRG_CL_RESET_VALID 0x00010000
static void __init
setup_python(struct pci_controller *hose, struct device_node *dev)
{
u32 __iomem *reg;
u32 val;
struct resource r;
if (of_address_to_resource(dev, 0, &r)) {
printk(KERN_ERR "No address for Python PCI controller\n");
return;
}
/* Clear the magic go-slow bit */
reg = ioremap(r.start + 0xf6000, 0x40);
BUG_ON(!reg);
val = in_be32(®[12]);
if (val & PRG_CL_RESET_VALID) {
out_be32(®[12], val & ~PRG_CL_RESET_VALID);
in_be32(®[12]);
}
iounmap(reg);
setup_indirect_pci(hose, r.start + 0xf8000, r.start + 0xf8010, 0);
}
/* Marvell Discovery II based Pegasos 2 */
static void __init setup_peg2(struct pci_controller *hose, struct device_node *dev)
{
struct device_node *root = of_find_node_by_path("/");
struct device_node *rtas;
rtas = of_find_node_by_name (root, "rtas");
if (rtas) {
hose->ops = &rtas_pci_ops;
of_node_put(rtas);
} else {
printk ("RTAS supporting Pegasos OF not found, please upgrade"
" your firmware\n");
}
pci_add_flags(PCI_REASSIGN_ALL_BUS);
/* keep the reference to the root node */
}
void __init
chrp_find_bridges(void)
{
struct device_node *dev;
const int *bus_range;
int len, index = -1;
struct pci_controller *hose;
const unsigned int *dma;
const char *model, *machine;
int is_longtrail = 0, is_mot = 0, is_pegasos = 0;
struct device_node *root = of_find_node_by_path("/");
struct resource r;
/*
* The PCI host bridge nodes on some machines don't have
* properties to adequately identify them, so we have to
* look at what sort of machine this is as well.
*/
machine = of_get_property(root, "model", NULL);
if (machine != NULL) {
is_longtrail = strncmp(machine, "IBM,LongTrail", 13) == 0;
is_mot = strncmp(machine, "MOT", 3) == 0;
if (strncmp(machine, "Pegasos2", 8) == 0)
is_pegasos = 2;
else if (strncmp(machine, "Pegasos", 7) == 0)
is_pegasos = 1;
}
for_each_child_of_node(root, dev) {
if (!of_node_is_type(dev, "pci"))
continue;
++index;
/* The GG2 bridge on the LongTrail doesn't have an address */
if (of_address_to_resource(dev, 0, &r) && !is_longtrail) {
printk(KERN_WARNING "Can't use %pOF: no address\n",
dev);
continue;
}
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %pOF\n",
dev);
continue;
}
if (bus_range[1] == bus_range[0])
printk(KERN_INFO "PCI bus %d", bus_range[0]);
else
printk(KERN_INFO "PCI buses %d..%d",
bus_range[0], bus_range[1]);
printk(" controlled by %pOF", dev);
if (!is_longtrail)
printk(" at %llx", (unsigned long long)r.start);
printk("\n");
hose = pcibios_alloc_controller(dev);
if (!hose) {
printk("Can't allocate PCI controller structure for %pOF\n",
dev);
continue;
}
hose->first_busno = hose->self_busno = bus_range[0];
hose->last_busno = bus_range[1];
model = of_get_property(dev, "model", NULL);
if (model == NULL)
model = "<none>";
if (strncmp(model, "IBM, Python", 11) == 0) {
setup_python(hose, dev);
} else if (is_mot
|| strncmp(model, "Motorola, Grackle", 17) == 0) {
setup_grackle(hose);
} else if (is_longtrail) {
void __iomem *p = ioremap(GG2_PCI_CONFIG_BASE, 0x80000);
hose->ops = &gg2_pci_ops;
hose->cfg_data = p;
gg2_pci_config_base = p;
} else if (is_pegasos == 1) {
setup_indirect_pci(hose, 0xfec00cf8, 0xfee00cfc, 0);
} else if (is_pegasos == 2) {
setup_peg2(hose, dev);
} else if (!strncmp(model, "IBM,CPC710", 10)) {
setup_indirect_pci(hose,
r.start + 0x000f8000,
r.start + 0x000f8010,
0);
if (index == 0) {
dma = of_get_property(dev, "system-dma-base",
&len);
if (dma && len >= sizeof(*dma)) {
dma = (unsigned int *)
(((unsigned long)dma) +
len - sizeof(*dma));
pci_dram_offset = *dma;
}
}
} else {
printk("No methods for %pOF (model %s), using RTAS\n",
dev, model);
hose->ops = &rtas_pci_ops;
}
pci_process_bridge_OF_ranges(hose, dev, index == 0);
/* check the first bridge for a property that we can
use to set pci_dram_offset */
dma = of_get_property(dev, "ibm,dma-ranges", &len);
if (index == 0 && dma != NULL && len >= 6 * sizeof(*dma)) {
pci_dram_offset = dma[2] - dma[3];
printk("pci_dram_offset = %lx\n", pci_dram_offset);
}
}
of_node_put(root);
/*
* "Temporary" fixes for PCI devices.
* -- Geert
*/
hydra_init(); /* Mac I/O */
pci_create_OF_bus_map();
}
/* SL82C105 IDE Control/Status Register */
#define SL82C105_IDECSR 0x40
/* Fixup for Winbond ATA quirk, required for briq mostly because the
* 8259 is configured for level sensitive IRQ 14 and so wants the
* ATA controller to be set to fully native mode or bad things
* will happen.
*/
static void chrp_pci_fixup_winbond_ata(struct pci_dev *sl82c105)
{
u8 progif;
/* If non-briq machines need that fixup too, please speak up */
if (!machine_is(chrp) || _chrp_type != _CHRP_briq)
return;
if ((sl82c105->class & 5) != 5) {
printk("W83C553: Switching SL82C105 IDE to PCI native mode\n");
/* Enable SL82C105 PCI native IDE mode */
pci_read_config_byte(sl82c105, PCI_CLASS_PROG, &progif);
pci_write_config_byte(sl82c105, PCI_CLASS_PROG, progif | 0x05);
sl82c105->class |= 0x05;
/* Disable SL82C105 second port */
pci_write_config_word(sl82c105, SL82C105_IDECSR, 0x0003);
/* Clear IO BARs, they will be reassigned */
pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_0, 0);
pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_1, 0);
pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_2, 0);
pci_write_config_dword(sl82c105, PCI_BASE_ADDRESS_3, 0);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
chrp_pci_fixup_winbond_ata);
/* Pegasos2 firmware version 20040810 configures the built-in IDE controller
* in legacy mode, but sets the PCI registers to PCI native mode.
* The chip can only operate in legacy mode, so force the PCI class into legacy
* mode as well. The same fixup must be done to the class-code property in
* the IDE node /pci@80000000/ide@C,1
*/
static void chrp_pci_fixup_vt8231_ata(struct pci_dev *viaide)
{
u8 progif;
struct pci_dev *viaisa;
if (!machine_is(chrp) || _chrp_type != _CHRP_Pegasos)
return;
if (viaide->irq != 14)
return;
viaisa = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
if (!viaisa)
return;
dev_info(&viaide->dev, "Fixing VIA IDE, force legacy mode on\n");
pci_read_config_byte(viaide, PCI_CLASS_PROG, &progif);
pci_write_config_byte(viaide, PCI_CLASS_PROG, progif & ~0x5);
viaide->class &= ~0x5;
pci_dev_put(viaisa);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, chrp_pci_fixup_vt8231_ata);
| linux-master | arch/powerpc/platforms/chrp/pci.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1995 Linus Torvalds
* Adapted from 'alpha' version by Gary Thomas
* Modified by Cort Dougan ([email protected])
*/
/*
* bootup setup stuff..
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/tty.h>
#include <linux/major.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <generated/utsrelease.h>
#include <linux/adb.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/console.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/initrd.h>
#include <linux/timer.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_irq.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/dma.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/hydra.h>
#include <asm/sections.h>
#include <asm/time.h>
#include <asm/i8259.h>
#include <asm/mpic.h>
#include <asm/rtas.h>
#include <asm/xmon.h>
#include "chrp.h"
#include "gg2.h"
void rtas_indicator_progress(char *, unsigned short);
int _chrp_type;
EXPORT_SYMBOL(_chrp_type);
static struct mpic *chrp_mpic;
/* Used for doing CHRP event-scans */
DEFINE_PER_CPU(struct timer_list, heartbeat_timer);
unsigned long event_scan_interval;
extern unsigned long loops_per_jiffy;
/* To be replaced by RTAS when available */
static unsigned int __iomem *briq_SPOR;
#ifdef CONFIG_SMP
extern struct smp_ops_t chrp_smp_ops;
#endif
static const char *gg2_memtypes[4] = {
"FPM", "SDRAM", "EDO", "BEDO"
};
static const char *gg2_cachesizes[4] = {
"256 KB", "512 KB", "1 MB", "Reserved"
};
static const char *gg2_cachetypes[4] = {
"Asynchronous", "Reserved", "Flow-Through Synchronous",
"Pipelined Synchronous"
};
static const char *gg2_cachemodes[4] = {
"Disabled", "Write-Through", "Copy-Back", "Transparent Mode"
};
static const char *chrp_names[] = {
"Unknown",
"","","",
"Motorola",
"IBM or Longtrail",
"Genesi Pegasos",
"Total Impact Briq"
};
static void chrp_show_cpuinfo(struct seq_file *m)
{
int i, sdramen;
unsigned int t;
struct device_node *root;
const char *model = "";
root = of_find_node_by_path("/");
if (root)
model = of_get_property(root, "model", NULL);
seq_printf(m, "machine\t\t: CHRP %s\n", model);
/* longtrail (goldengate) stuff */
if (model && !strncmp(model, "IBM,LongTrail", 13)) {
/* VLSI VAS96011/12 `Golden Gate 2' */
/* Memory banks */
sdramen = (in_le32(gg2_pci_config_base + GG2_PCI_DRAM_CTRL)
>>31) & 1;
for (i = 0; i < (sdramen ? 4 : 6); i++) {
t = in_le32(gg2_pci_config_base+
GG2_PCI_DRAM_BANK0+
i*4);
if (!(t & 1))
continue;
switch ((t>>8) & 0x1f) {
case 0x1f:
model = "4 MB";
break;
case 0x1e:
model = "8 MB";
break;
case 0x1c:
model = "16 MB";
break;
case 0x18:
model = "32 MB";
break;
case 0x10:
model = "64 MB";
break;
case 0x00:
model = "128 MB";
break;
default:
model = "Reserved";
break;
}
seq_printf(m, "memory bank %d\t: %s %s\n", i, model,
gg2_memtypes[sdramen ? 1 : ((t>>1) & 3)]);
}
/* L2 cache */
t = in_le32(gg2_pci_config_base+GG2_PCI_CC_CTRL);
seq_printf(m, "board l2\t: %s %s (%s)\n",
gg2_cachesizes[(t>>7) & 3],
gg2_cachetypes[(t>>2) & 3],
gg2_cachemodes[t & 3]);
}
of_node_put(root);
}
/*
* Fixes for the National Semiconductor PC78308VUL SuperI/O
*
* Some versions of Open Firmware incorrectly initialize the IRQ settings
* for keyboard and mouse
*/
static inline void __init sio_write(u8 val, u8 index)
{
outb(index, 0x15c);
outb(val, 0x15d);
}
static inline u8 __init sio_read(u8 index)
{
outb(index, 0x15c);
return inb(0x15d);
}
static void __init sio_fixup_irq(const char *name, u8 device, u8 level,
u8 type)
{
u8 level0, type0, active;
/* select logical device */
sio_write(device, 0x07);
active = sio_read(0x30);
level0 = sio_read(0x70);
type0 = sio_read(0x71);
if (level0 != level || type0 != type || !active) {
printk(KERN_WARNING "sio: %s irq level %d, type %d, %sactive: "
"remapping to level %d, type %d, active\n",
name, level0, type0, !active ? "in" : "", level, type);
sio_write(0x01, 0x30);
sio_write(level, 0x70);
sio_write(type, 0x71);
}
}
static void __init sio_init(void)
{
struct device_node *root;
const char *model;
root = of_find_node_by_path("/");
if (!root)
return;
model = of_get_property(root, "model", NULL);
if (model && !strncmp(model, "IBM,LongTrail", 13)) {
/* logical device 0 (KBC/Keyboard) */
sio_fixup_irq("keyboard", 0, 1, 2);
/* select logical device 1 (KBC/Mouse) */
sio_fixup_irq("mouse", 1, 12, 2);
}
of_node_put(root);
}
static void __init pegasos_set_l2cr(void)
{
struct device_node *np;
/* On Pegasos, enable the l2 cache if needed, as the OF forgets it */
if (_chrp_type != _CHRP_Pegasos)
return;
/* Enable L2 cache if needed */
np = of_find_node_by_type(NULL, "cpu");
if (np != NULL) {
const unsigned int *l2cr = of_get_property(np, "l2cr", NULL);
if (l2cr == NULL) {
printk ("Pegasos l2cr : no cpu l2cr property found\n");
goto out;
}
if (!((*l2cr) & 0x80000000)) {
printk ("Pegasos l2cr : L2 cache was not active, "
"activating\n");
_set_L2CR(0);
_set_L2CR((*l2cr) | 0x80000000);
}
}
out:
of_node_put(np);
}
static void __noreturn briq_restart(char *cmd)
{
local_irq_disable();
if (briq_SPOR)
out_be32(briq_SPOR, 0);
for(;;);
}
/*
* Per default, input/output-device points to the keyboard/screen
* If no card is installed, the built-in serial port is used as a fallback.
* But unfortunately, the firmware does not connect /chosen/{stdin,stdout}
* to the built-in serial node. Instead, a /failsafe node is created.
*/
static __init void chrp_init(void)
{
struct device_node *node;
const char *property;
if (strstr(boot_command_line, "console="))
return;
/* find the boot console from /chosen/stdout */
if (!of_chosen)
return;
node = of_find_node_by_path("/");
if (!node)
return;
property = of_get_property(node, "model", NULL);
if (!property)
goto out_put;
if (strcmp(property, "Pegasos2"))
goto out_put;
/* this is a Pegasos2 */
property = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (!property)
goto out_put;
of_node_put(node);
node = of_find_node_by_path(property);
if (!node)
return;
if (!of_node_is_type(node, "serial"))
goto out_put;
/*
* The 9pin connector is either /failsafe
* or /pci@80000000/isa@C/serial@i2F8
* The optional graphics card has also type 'serial' in VGA mode.
*/
if (of_node_name_eq(node, "failsafe") || of_node_name_eq(node, "serial"))
add_preferred_console("ttyS", 0, NULL);
out_put:
of_node_put(node);
}
static void __init chrp_setup_arch(void)
{
struct device_node *root = of_find_node_by_path("/");
const char *machine = NULL;
/* init to some ~sane value until calibrate_delay() runs */
loops_per_jiffy = 50000000/HZ;
if (root)
machine = of_get_property(root, "model", NULL);
if (machine && strncmp(machine, "Pegasos", 7) == 0) {
_chrp_type = _CHRP_Pegasos;
} else if (machine && strncmp(machine, "IBM", 3) == 0) {
_chrp_type = _CHRP_IBM;
} else if (machine && strncmp(machine, "MOT", 3) == 0) {
_chrp_type = _CHRP_Motorola;
} else if (machine && strncmp(machine, "TotalImpact,BRIQ-1", 18) == 0) {
_chrp_type = _CHRP_briq;
/* Map the SPOR register on briq and change the restart hook */
briq_SPOR = ioremap(0xff0000e8, 4);
ppc_md.restart = briq_restart;
} else {
/* Let's assume it is an IBM chrp if all else fails */
_chrp_type = _CHRP_IBM;
}
of_node_put(root);
printk("chrp type = %x [%s]\n", _chrp_type, chrp_names[_chrp_type]);
rtas_initialize();
if (rtas_function_token(RTAS_FN_DISPLAY_CHARACTER) >= 0)
ppc_md.progress = rtas_progress;
/* use RTAS time-of-day routines if available */
if (rtas_function_token(RTAS_FN_GET_TIME_OF_DAY) != RTAS_UNKNOWN_SERVICE) {
ppc_md.get_boot_time = rtas_get_boot_time;
ppc_md.get_rtc_time = rtas_get_rtc_time;
ppc_md.set_rtc_time = rtas_set_rtc_time;
}
/* On pegasos, enable the L2 cache if not already done by OF */
pegasos_set_l2cr();
/*
* Fix the Super I/O configuration
*/
sio_init();
/*
* Print the banner, then scroll down so boot progress
* can be printed. -- Cort
*/
if (ppc_md.progress) ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0x0);
}
static void chrp_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
/*
* Finds the open-pic node and sets up the mpic driver.
*/
static void __init chrp_find_openpic(void)
{
struct device_node *np, *root;
int len, i, j;
int isu_size;
const unsigned int *iranges, *opprop = NULL;
int oplen = 0;
unsigned long opaddr;
int na = 1;
np = of_find_node_by_type(NULL, "open-pic");
if (np == NULL)
return;
root = of_find_node_by_path("/");
if (root) {
opprop = of_get_property(root, "platform-open-pic", &oplen);
na = of_n_addr_cells(root);
}
if (opprop && oplen >= na * sizeof(unsigned int)) {
opaddr = opprop[na-1]; /* assume 32-bit */
oplen /= na * sizeof(unsigned int);
} else {
struct resource r;
if (of_address_to_resource(np, 0, &r)) {
goto bail;
}
opaddr = r.start;
oplen = 0;
}
printk(KERN_INFO "OpenPIC at %lx\n", opaddr);
iranges = of_get_property(np, "interrupt-ranges", &len);
if (iranges == NULL)
len = 0; /* non-distributed mpic */
else
len /= 2 * sizeof(unsigned int);
/*
* The first pair of cells in interrupt-ranges refers to the
* IDU; subsequent pairs refer to the ISUs.
*/
if (oplen < len) {
printk(KERN_ERR "Insufficient addresses for distributed"
" OpenPIC (%d < %d)\n", oplen, len);
len = oplen;
}
isu_size = 0;
if (len > 0 && iranges[1] != 0) {
printk(KERN_INFO "OpenPIC irqs %d..%d in IDU\n",
iranges[0], iranges[0] + iranges[1] - 1);
}
if (len > 1)
isu_size = iranges[3];
chrp_mpic = mpic_alloc(np, opaddr, MPIC_NO_RESET,
isu_size, 0, " MPIC ");
if (chrp_mpic == NULL) {
printk(KERN_ERR "Failed to allocate MPIC structure\n");
goto bail;
}
j = na - 1;
for (i = 1; i < len; ++i) {
iranges += 2;
j += na;
printk(KERN_INFO "OpenPIC irqs %d..%d in ISU at %x\n",
iranges[0], iranges[0] + iranges[1] - 1,
opprop[j]);
mpic_assign_isu(chrp_mpic, i - 1, opprop[j]);
}
mpic_init(chrp_mpic);
ppc_md.get_irq = mpic_get_irq;
bail:
of_node_put(root);
of_node_put(np);
}
static void __init chrp_find_8259(void)
{
struct device_node *np, *pic = NULL;
unsigned long chrp_int_ack = 0;
unsigned int cascade_irq;
/* Look for cascade */
for_each_node_by_type(np, "interrupt-controller")
if (of_device_is_compatible(np, "chrp,iic")) {
pic = np;
break;
}
/* Ok, 8259 wasn't found. We need to handle the case where
* we have a pegasos that claims to be chrp but doesn't have
* a proper interrupt tree
*/
if (pic == NULL && chrp_mpic != NULL) {
printk(KERN_ERR "i8259: Not found in device-tree"
" assuming no legacy interrupts\n");
return;
}
/* Look for intack. In a perfect world, we would look for it on
* the ISA bus that holds the 8259 but heh... Works that way. If
* we ever see a problem, we can try to re-use the pSeries code here.
* Also, Pegasos-type platforms don't have a proper node to start
* from anyway
*/
for_each_node_by_name(np, "pci") {
const unsigned int *addrp = of_get_property(np,
"8259-interrupt-acknowledge", NULL);
if (addrp == NULL)
continue;
chrp_int_ack = addrp[of_n_addr_cells(np)-1];
break;
}
of_node_put(np);
if (np == NULL)
printk(KERN_WARNING "Cannot find PCI interrupt acknowledge"
" address, polling\n");
i8259_init(pic, chrp_int_ack);
if (ppc_md.get_irq == NULL) {
ppc_md.get_irq = i8259_irq;
irq_set_default_host(i8259_get_host());
}
if (chrp_mpic != NULL) {
cascade_irq = irq_of_parse_and_map(pic, 0);
if (!cascade_irq)
printk(KERN_ERR "i8259: failed to map cascade irq\n");
else
irq_set_chained_handler(cascade_irq,
chrp_8259_cascade);
}
}
static void __init chrp_init_IRQ(void)
{
#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_XMON)
struct device_node *kbd;
#endif
chrp_find_openpic();
chrp_find_8259();
#ifdef CONFIG_SMP
/* Pegasos has no MPIC, those ops would make it crash. It might be an
* option to move setting them to after we probe the PIC though
*/
if (chrp_mpic != NULL)
smp_ops = &chrp_smp_ops;
#endif /* CONFIG_SMP */
if (_chrp_type == _CHRP_Pegasos)
ppc_md.get_irq = i8259_irq;
#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_XMON)
/* see if there is a keyboard in the device tree
with a parent of type "adb" */
for_each_node_by_name(kbd, "keyboard")
if (of_node_is_type(kbd->parent, "adb"))
break;
of_node_put(kbd);
if (kbd) {
if (request_irq(HYDRA_INT_ADB_NMI, xmon_irq, 0, "XMON break",
NULL))
pr_err("Failed to register XMON break interrupt\n");
}
#endif
}
static void __init
chrp_init2(void)
{
#if IS_ENABLED(CONFIG_NVRAM)
chrp_nvram_init();
#endif
request_region(0x20,0x20,"pic1");
request_region(0xa0,0x20,"pic2");
request_region(0x00,0x20,"dma1");
request_region(0x40,0x20,"timer");
request_region(0x80,0x10,"dma page reg");
request_region(0xc0,0x20,"dma2");
if (ppc_md.progress)
ppc_md.progress(" Have fun! ", 0x7777);
}
static int __init chrp_probe(void)
{
const char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
"device_type", NULL);
if (dtype == NULL)
return 0;
if (strcmp(dtype, "chrp"))
return 0;
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
pm_power_off = rtas_power_off;
chrp_init();
return 1;
}
define_machine(chrp) {
.name = "CHRP",
.probe = chrp_probe,
.setup_arch = chrp_setup_arch,
.discover_phbs = chrp_find_bridges,
.init = chrp_init2,
.show_cpuinfo = chrp_show_cpuinfo,
.init_IRQ = chrp_init_IRQ,
.restart = rtas_restart,
.halt = rtas_halt,
.time_init = chrp_time_init,
.set_rtc_time = chrp_set_rtc_time,
.get_rtc_time = chrp_get_rtc_time,
.phys_mem_access_prot = pci_phys_mem_access_prot,
};
| linux-master | arch/powerpc/platforms/chrp/setup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*
* Adapted for PowerPC (PReP) by Gary Thomas
* Modified by Cort Dougan ([email protected]).
* Copied and modified from arch/i386/kernel/time.c
*
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/init.h>
#include <linux/bcd.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/nvram.h>
#include <asm/sections.h>
#include <asm/time.h>
#include <platforms/chrp/chrp.h>
#define NVRAM_AS0 0x74
#define NVRAM_AS1 0x75
#define NVRAM_DATA 0x77
static int nvram_as1 = NVRAM_AS1;
static int nvram_as0 = NVRAM_AS0;
static int nvram_data = NVRAM_DATA;
long __init chrp_time_init(void)
{
struct device_node *rtcs;
struct resource r;
int base;
rtcs = of_find_compatible_node(NULL, "rtc", "pnpPNP,b00");
if (rtcs == NULL)
rtcs = of_find_compatible_node(NULL, "rtc", "ds1385-rtc");
if (rtcs == NULL)
return 0;
if (of_address_to_resource(rtcs, 0, &r)) {
of_node_put(rtcs);
return 0;
}
of_node_put(rtcs);
base = r.start;
nvram_as1 = 0;
nvram_as0 = base;
nvram_data = base + 1;
return 0;
}
static int chrp_cmos_clock_read(int addr)
{
if (nvram_as1 != 0)
outb(addr>>8, nvram_as1);
outb(addr, nvram_as0);
return (inb(nvram_data));
}
static void chrp_cmos_clock_write(unsigned long val, int addr)
{
if (nvram_as1 != 0)
outb(addr>>8, nvram_as1);
outb(addr, nvram_as0);
outb(val, nvram_data);
return;
}
/*
* Set the hardware clock. -- Cort
*/
int chrp_set_rtc_time(struct rtc_time *tmarg)
{
unsigned char save_control, save_freq_select;
struct rtc_time tm = *tmarg;
spin_lock(&rtc_lock);
save_control = chrp_cmos_clock_read(RTC_CONTROL); /* tell the clock it's being set */
chrp_cmos_clock_write((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = chrp_cmos_clock_read(RTC_FREQ_SELECT); /* stop and reset prescaler */
chrp_cmos_clock_write((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
tm.tm_sec = bin2bcd(tm.tm_sec);
tm.tm_min = bin2bcd(tm.tm_min);
tm.tm_hour = bin2bcd(tm.tm_hour);
tm.tm_mon = bin2bcd(tm.tm_mon);
tm.tm_mday = bin2bcd(tm.tm_mday);
tm.tm_year = bin2bcd(tm.tm_year);
}
chrp_cmos_clock_write(tm.tm_sec,RTC_SECONDS);
chrp_cmos_clock_write(tm.tm_min,RTC_MINUTES);
chrp_cmos_clock_write(tm.tm_hour,RTC_HOURS);
chrp_cmos_clock_write(tm.tm_mon,RTC_MONTH);
chrp_cmos_clock_write(tm.tm_mday,RTC_DAY_OF_MONTH);
chrp_cmos_clock_write(tm.tm_year,RTC_YEAR);
/* The following flags have to be released exactly in this order,
* otherwise the DS12887 (popular MC146818A clone with integrated
* battery and quartz) will not reset the oscillator and will not
* update precisely 500 ms later. You won't find this mentioned in
* the Dallas Semiconductor data sheets, but who believes data
* sheets anyway ... -- Markus Kuhn
*/
chrp_cmos_clock_write(save_control, RTC_CONTROL);
chrp_cmos_clock_write(save_freq_select, RTC_FREQ_SELECT);
spin_unlock(&rtc_lock);
return 0;
}
void chrp_get_rtc_time(struct rtc_time *tm)
{
unsigned int year, mon, day, hour, min, sec;
do {
sec = chrp_cmos_clock_read(RTC_SECONDS);
min = chrp_cmos_clock_read(RTC_MINUTES);
hour = chrp_cmos_clock_read(RTC_HOURS);
day = chrp_cmos_clock_read(RTC_DAY_OF_MONTH);
mon = chrp_cmos_clock_read(RTC_MONTH);
year = chrp_cmos_clock_read(RTC_YEAR);
} while (sec != chrp_cmos_clock_read(RTC_SECONDS));
if (!(chrp_cmos_clock_read(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
sec = bcd2bin(sec);
min = bcd2bin(min);
hour = bcd2bin(hour);
day = bcd2bin(day);
mon = bcd2bin(mon);
year = bcd2bin(year);
}
if (year < 70)
year += 100;
tm->tm_sec = sec;
tm->tm_min = min;
tm->tm_hour = hour;
tm->tm_mday = day;
tm->tm_mon = mon;
tm->tm_year = year;
}
| linux-master | arch/powerpc/platforms/chrp/time.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* c 2001 PPC 64 Team, IBM Corp
*
* /dev/nvram driver for PPC
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include "chrp.h"
static unsigned int nvram_size;
static unsigned char nvram_buf[4];
static DEFINE_SPINLOCK(nvram_lock);
static unsigned char chrp_nvram_read_val(int addr)
{
unsigned int done;
unsigned long flags;
unsigned char ret;
if (addr >= nvram_size) {
printk(KERN_DEBUG "%s: read addr %d > nvram_size %u\n",
current->comm, addr, nvram_size);
return 0xff;
}
spin_lock_irqsave(&nvram_lock, flags);
if ((rtas_call(rtas_function_token(RTAS_FN_NVRAM_FETCH), 3, 2, &done, addr,
__pa(nvram_buf), 1) != 0) || 1 != done)
ret = 0xff;
else
ret = nvram_buf[0];
spin_unlock_irqrestore(&nvram_lock, flags);
return ret;
}
static void chrp_nvram_write_val(int addr, unsigned char val)
{
unsigned int done;
unsigned long flags;
if (addr >= nvram_size) {
printk(KERN_DEBUG "%s: write addr %d > nvram_size %u\n",
current->comm, addr, nvram_size);
return;
}
spin_lock_irqsave(&nvram_lock, flags);
nvram_buf[0] = val;
if ((rtas_call(rtas_function_token(RTAS_FN_NVRAM_STORE), 3, 2, &done, addr,
__pa(nvram_buf), 1) != 0) || 1 != done)
printk(KERN_DEBUG "rtas IO error storing 0x%02x at %d", val, addr);
spin_unlock_irqrestore(&nvram_lock, flags);
}
static ssize_t chrp_nvram_size(void)
{
return nvram_size;
}
void __init chrp_nvram_init(void)
{
struct device_node *nvram;
const __be32 *nbytes_p;
unsigned int proplen;
nvram = of_find_node_by_type(NULL, "nvram");
if (nvram == NULL)
return;
nbytes_p = of_get_property(nvram, "#bytes", &proplen);
if (nbytes_p == NULL || proplen != sizeof(unsigned int)) {
of_node_put(nvram);
return;
}
nvram_size = be32_to_cpup(nbytes_p);
printk(KERN_INFO "CHRP nvram contains %u bytes\n", nvram_size);
of_node_put(nvram);
ppc_md.nvram_read_val = chrp_nvram_read_val;
ppc_md.nvram_write_val = chrp_nvram_write_val;
ppc_md.nvram_size = chrp_nvram_size;
return;
}
MODULE_LICENSE("GPL v2");
| linux-master | arch/powerpc/platforms/chrp/nvram.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Smp support for CHRP machines.
*
* Written by Cort Dougan ([email protected]) borrowing a great
* deal of code from the sparc and intel versions.
*
* Copyright (C) 1999 Cort Dougan <[email protected]>
*
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/mpic.h>
#include <asm/rtas.h>
static int smp_chrp_kick_cpu(int nr)
{
*(unsigned long *)KERNELBASE = nr;
asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory");
return 0;
}
static void smp_chrp_setup_cpu(int cpu_nr)
{
mpic_setup_this_cpu();
}
/* CHRP with openpic */
struct smp_ops_t chrp_smp_ops = {
.cause_nmi_ipi = NULL,
.message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe,
.kick_cpu = smp_chrp_kick_cpu,
.setup_cpu = smp_chrp_setup_cpu,
.give_timebase = rtas_give_timebase,
.take_timebase = rtas_take_timebase,
};
| linux-master | arch/powerpc/platforms/chrp/smp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PowerNV system parameter code
*
* Copyright (C) 2013 IBM
*/
#include <linux/kobject.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/gfp.h>
#include <linux/stat.h>
#include <asm/opal.h>
#define MAX_PARAM_DATA_LEN 64
static DEFINE_MUTEX(opal_sysparam_mutex);
static struct kobject *sysparam_kobj;
static void *param_data_buf;
struct param_attr {
struct list_head list;
u32 param_id;
u32 param_size;
struct kobj_attribute kobj_attr;
};
static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
{
struct opal_msg msg;
ssize_t ret;
int token;
token = opal_async_get_token_interruptible();
if (token < 0) {
if (token != -ERESTARTSYS)
pr_err("%s: Couldn't get the token, returning\n",
__func__);
ret = token;
goto out;
}
ret = opal_get_param(token, param_id, (u64)buffer, length);
if (ret != OPAL_ASYNC_COMPLETION) {
ret = opal_error_code(ret);
goto out_token;
}
ret = opal_async_wait_response(token, &msg);
if (ret) {
pr_err("%s: Failed to wait for the async response, %zd\n",
__func__, ret);
goto out_token;
}
ret = opal_error_code(opal_get_async_rc(msg));
out_token:
opal_async_release_token(token);
out:
return ret;
}
static int opal_set_sys_param(u32 param_id, u32 length, void *buffer)
{
struct opal_msg msg;
int ret, token;
token = opal_async_get_token_interruptible();
if (token < 0) {
if (token != -ERESTARTSYS)
pr_err("%s: Couldn't get the token, returning\n",
__func__);
ret = token;
goto out;
}
ret = opal_set_param(token, param_id, (u64)buffer, length);
if (ret != OPAL_ASYNC_COMPLETION) {
ret = opal_error_code(ret);
goto out_token;
}
ret = opal_async_wait_response(token, &msg);
if (ret) {
pr_err("%s: Failed to wait for the async response, %d\n",
__func__, ret);
goto out_token;
}
ret = opal_error_code(opal_get_async_rc(msg));
out_token:
opal_async_release_token(token);
out:
return ret;
}
static ssize_t sys_param_show(struct kobject *kobj,
struct kobj_attribute *kobj_attr, char *buf)
{
struct param_attr *attr = container_of(kobj_attr, struct param_attr,
kobj_attr);
ssize_t ret;
mutex_lock(&opal_sysparam_mutex);
ret = opal_get_sys_param(attr->param_id, attr->param_size,
param_data_buf);
if (ret)
goto out;
memcpy(buf, param_data_buf, attr->param_size);
ret = attr->param_size;
out:
mutex_unlock(&opal_sysparam_mutex);
return ret;
}
static ssize_t sys_param_store(struct kobject *kobj,
struct kobj_attribute *kobj_attr, const char *buf, size_t count)
{
struct param_attr *attr = container_of(kobj_attr, struct param_attr,
kobj_attr);
ssize_t ret;
/* MAX_PARAM_DATA_LEN is sizeof(param_data_buf) */
if (count > MAX_PARAM_DATA_LEN)
count = MAX_PARAM_DATA_LEN;
mutex_lock(&opal_sysparam_mutex);
memcpy(param_data_buf, buf, count);
ret = opal_set_sys_param(attr->param_id, attr->param_size,
param_data_buf);
mutex_unlock(&opal_sysparam_mutex);
if (!ret)
ret = count;
return ret;
}
void __init opal_sys_param_init(void)
{
struct device_node *sysparam;
struct param_attr *attr;
u32 *id, *size;
int count, i;
u8 *perm;
if (!opal_kobj) {
pr_warn("SYSPARAM: opal kobject is not available\n");
goto out;
}
/* Some systems do not use sysparams; this is not an error */
sysparam = of_find_node_by_path("/ibm,opal/sysparams");
if (!sysparam)
goto out;
if (!of_device_is_compatible(sysparam, "ibm,opal-sysparams")) {
pr_err("SYSPARAM: Opal sysparam node not compatible\n");
goto out_node_put;
}
sysparam_kobj = kobject_create_and_add("sysparams", opal_kobj);
if (!sysparam_kobj) {
pr_err("SYSPARAM: Failed to create sysparam kobject\n");
goto out_node_put;
}
/* Allocate big enough buffer for any get/set transactions */
param_data_buf = kzalloc(MAX_PARAM_DATA_LEN, GFP_KERNEL);
if (!param_data_buf) {
pr_err("SYSPARAM: Failed to allocate memory for param data "
"buf\n");
goto out_kobj_put;
}
/* Number of parameters exposed through DT */
count = of_property_count_strings(sysparam, "param-name");
if (count < 0) {
pr_err("SYSPARAM: No string found of property param-name in "
"the node %pOFn\n", sysparam);
goto out_param_buf;
}
id = kcalloc(count, sizeof(*id), GFP_KERNEL);
if (!id) {
pr_err("SYSPARAM: Failed to allocate memory to read parameter "
"id\n");
goto out_param_buf;
}
size = kcalloc(count, sizeof(*size), GFP_KERNEL);
if (!size) {
pr_err("SYSPARAM: Failed to allocate memory to read parameter "
"size\n");
goto out_free_id;
}
perm = kcalloc(count, sizeof(*perm), GFP_KERNEL);
if (!perm) {
pr_err("SYSPARAM: Failed to allocate memory to read supported "
"action on the parameter");
goto out_free_size;
}
if (of_property_read_u32_array(sysparam, "param-id", id, count)) {
pr_err("SYSPARAM: Missing property param-id in the DT\n");
goto out_free_perm;
}
if (of_property_read_u32_array(sysparam, "param-len", size, count)) {
pr_err("SYSPARAM: Missing property param-len in the DT\n");
goto out_free_perm;
}
if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) {
pr_err("SYSPARAM: Missing property param-perm in the DT\n");
goto out_free_perm;
}
attr = kcalloc(count, sizeof(*attr), GFP_KERNEL);
if (!attr) {
pr_err("SYSPARAM: Failed to allocate memory for parameter "
"attributes\n");
goto out_free_perm;
}
/* For each of the parameters, populate the parameter attributes */
for (i = 0; i < count; i++) {
if (size[i] > MAX_PARAM_DATA_LEN) {
pr_warn("SYSPARAM: Not creating parameter %d as size "
"exceeds buffer length\n", i);
continue;
}
sysfs_attr_init(&attr[i].kobj_attr.attr);
attr[i].param_id = id[i];
attr[i].param_size = size[i];
if (of_property_read_string_index(sysparam, "param-name", i,
&attr[i].kobj_attr.attr.name))
continue;
/* If the parameter is read-only or read-write */
switch (perm[i] & 3) {
case OPAL_SYSPARAM_READ:
attr[i].kobj_attr.attr.mode = 0444;
break;
case OPAL_SYSPARAM_WRITE:
attr[i].kobj_attr.attr.mode = 0200;
break;
case OPAL_SYSPARAM_RW:
attr[i].kobj_attr.attr.mode = 0644;
break;
default:
break;
}
attr[i].kobj_attr.show = sys_param_show;
attr[i].kobj_attr.store = sys_param_store;
if (sysfs_create_file(sysparam_kobj, &attr[i].kobj_attr.attr)) {
pr_err("SYSPARAM: Failed to create sysfs file %s\n",
attr[i].kobj_attr.attr.name);
goto out_free_attr;
}
}
kfree(perm);
kfree(size);
kfree(id);
of_node_put(sysparam);
return;
out_free_attr:
kfree(attr);
out_free_perm:
kfree(perm);
out_free_size:
kfree(size);
out_free_id:
kfree(id);
out_param_buf:
kfree(param_data_buf);
out_kobj_put:
kobject_put(sysparam_kobj);
out_node_put:
of_node_put(sysparam);
out:
return;
}
| linux-master | arch/powerpc/platforms/powernv/opal-sysparam.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PowerNV OPAL Dump Interface
*
* Copyright 2013,2014 IBM Corp.
*/
#include <linux/kobject.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <asm/opal.h>
#define DUMP_TYPE_FSP 0x01
struct dump_obj {
struct kobject kobj;
struct bin_attribute dump_attr;
uint32_t id; /* becomes object name */
uint32_t type;
uint32_t size;
char *buffer;
};
#define to_dump_obj(x) container_of(x, struct dump_obj, kobj)
struct dump_attribute {
struct attribute attr;
ssize_t (*show)(struct dump_obj *dump, struct dump_attribute *attr,
char *buf);
ssize_t (*store)(struct dump_obj *dump, struct dump_attribute *attr,
const char *buf, size_t count);
};
#define to_dump_attr(x) container_of(x, struct dump_attribute, attr)
static ssize_t dump_id_show(struct dump_obj *dump_obj,
struct dump_attribute *attr,
char *buf)
{
return sprintf(buf, "0x%x\n", dump_obj->id);
}
static const char* dump_type_to_string(uint32_t type)
{
switch (type) {
case 0x01: return "SP Dump";
case 0x02: return "System/Platform Dump";
case 0x03: return "SMA Dump";
default: return "unknown";
}
}
static ssize_t dump_type_show(struct dump_obj *dump_obj,
struct dump_attribute *attr,
char *buf)
{
return sprintf(buf, "0x%x %s\n", dump_obj->type,
dump_type_to_string(dump_obj->type));
}
static ssize_t dump_ack_show(struct dump_obj *dump_obj,
struct dump_attribute *attr,
char *buf)
{
return sprintf(buf, "ack - acknowledge dump\n");
}
/*
* Send acknowledgement to OPAL
*/
static int64_t dump_send_ack(uint32_t dump_id)
{
int rc;
rc = opal_dump_ack(dump_id);
if (rc)
pr_warn("%s: Failed to send ack to Dump ID 0x%x (%d)\n",
__func__, dump_id, rc);
return rc;
}
static ssize_t dump_ack_store(struct dump_obj *dump_obj,
struct dump_attribute *attr,
const char *buf,
size_t count)
{
/*
* Try to self remove this attribute. If we are successful,
* delete the kobject itself.
*/
if (sysfs_remove_file_self(&dump_obj->kobj, &attr->attr)) {
dump_send_ack(dump_obj->id);
kobject_put(&dump_obj->kobj);
}
return count;
}
/* Attributes of a dump
* The binary attribute of the dump itself is dynamic
* due to the dynamic size of the dump
*/
static struct dump_attribute id_attribute =
__ATTR(id, 0444, dump_id_show, NULL);
static struct dump_attribute type_attribute =
__ATTR(type, 0444, dump_type_show, NULL);
static struct dump_attribute ack_attribute =
__ATTR(acknowledge, 0660, dump_ack_show, dump_ack_store);
static ssize_t init_dump_show(struct dump_obj *dump_obj,
struct dump_attribute *attr,
char *buf)
{
return sprintf(buf, "1 - initiate Service Processor(FSP) dump\n");
}
static int64_t dump_fips_init(uint8_t type)
{
int rc;
rc = opal_dump_init(type);
if (rc)
pr_warn("%s: Failed to initiate FSP dump (%d)\n",
__func__, rc);
return rc;
}
static ssize_t init_dump_store(struct dump_obj *dump_obj,
struct dump_attribute *attr,
const char *buf,
size_t count)
{
int rc;
rc = dump_fips_init(DUMP_TYPE_FSP);
if (rc == OPAL_SUCCESS)
pr_info("%s: Initiated FSP dump\n", __func__);
return count;
}
static struct dump_attribute initiate_attribute =
__ATTR(initiate_dump, 0600, init_dump_show, init_dump_store);
static struct attribute *initiate_attrs[] = {
&initiate_attribute.attr,
NULL,
};
static const struct attribute_group initiate_attr_group = {
.attrs = initiate_attrs,
};
static struct kset *dump_kset;
static ssize_t dump_attr_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct dump_attribute *attribute;
struct dump_obj *dump;
attribute = to_dump_attr(attr);
dump = to_dump_obj(kobj);
if (!attribute->show)
return -EIO;
return attribute->show(dump, attribute, buf);
}
static ssize_t dump_attr_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t len)
{
struct dump_attribute *attribute;
struct dump_obj *dump;
attribute = to_dump_attr(attr);
dump = to_dump_obj(kobj);
if (!attribute->store)
return -EIO;
return attribute->store(dump, attribute, buf, len);
}
static const struct sysfs_ops dump_sysfs_ops = {
.show = dump_attr_show,
.store = dump_attr_store,
};
static void dump_release(struct kobject *kobj)
{
struct dump_obj *dump;
dump = to_dump_obj(kobj);
vfree(dump->buffer);
kfree(dump);
}
static struct attribute *dump_default_attrs[] = {
&id_attribute.attr,
&type_attribute.attr,
&ack_attribute.attr,
NULL,
};
ATTRIBUTE_GROUPS(dump_default);
static struct kobj_type dump_ktype = {
.sysfs_ops = &dump_sysfs_ops,
.release = &dump_release,
.default_groups = dump_default_groups,
};
static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type)
{
__be32 id, size, type;
int rc;
type = cpu_to_be32(0xffffffff);
rc = opal_dump_info2(&id, &size, &type);
if (rc == OPAL_PARAMETER)
rc = opal_dump_info(&id, &size);
if (rc) {
pr_warn("%s: Failed to get dump info (%d)\n",
__func__, rc);
return rc;
}
*dump_id = be32_to_cpu(id);
*dump_size = be32_to_cpu(size);
*dump_type = be32_to_cpu(type);
return rc;
}
static int64_t dump_read_data(struct dump_obj *dump)
{
struct opal_sg_list *list;
uint64_t addr;
int64_t rc;
/* Allocate memory */
dump->buffer = vzalloc(PAGE_ALIGN(dump->size));
if (!dump->buffer) {
pr_err("%s : Failed to allocate memory\n", __func__);
rc = -ENOMEM;
goto out;
}
/* Generate SG list */
list = opal_vmalloc_to_sg_list(dump->buffer, dump->size);
if (!list) {
rc = -ENOMEM;
goto out;
}
/* First entry address */
addr = __pa(list);
/* Fetch data */
rc = OPAL_BUSY_EVENT;
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_dump_read(dump->id, addr);
if (rc == OPAL_BUSY_EVENT) {
opal_poll_events(NULL);
msleep(20);
}
}
if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL)
pr_warn("%s: Extract dump failed for ID 0x%x\n",
__func__, dump->id);
/* Free SG list */
opal_free_sg_list(list);
out:
return rc;
}
static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
ssize_t rc;
struct dump_obj *dump = to_dump_obj(kobj);
if (!dump->buffer) {
rc = dump_read_data(dump);
if (rc != OPAL_SUCCESS && rc != OPAL_PARTIAL) {
vfree(dump->buffer);
dump->buffer = NULL;
return -EIO;
}
if (rc == OPAL_PARTIAL) {
/* On a partial read, we just return EIO
* and rely on userspace to ask us to try
* again.
*/
pr_info("%s: Platform dump partially read. ID = 0x%x\n",
__func__, dump->id);
return -EIO;
}
}
memcpy(buffer, dump->buffer + pos, count);
/* You may think we could free the dump buffer now and retrieve
* it again later if needed, but due to current firmware limitation,
* that's not the case. So, once read into userspace once,
* we keep the dump around until it's acknowledged by userspace.
*/
return count;
}
static void create_dump_obj(uint32_t id, size_t size, uint32_t type)
{
struct dump_obj *dump;
int rc;
dump = kzalloc(sizeof(*dump), GFP_KERNEL);
if (!dump)
return;
dump->kobj.kset = dump_kset;
kobject_init(&dump->kobj, &dump_ktype);
sysfs_bin_attr_init(&dump->dump_attr);
dump->dump_attr.attr.name = "dump";
dump->dump_attr.attr.mode = 0400;
dump->dump_attr.size = size;
dump->dump_attr.read = dump_attr_read;
dump->id = id;
dump->size = size;
dump->type = type;
rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
if (rc) {
kobject_put(&dump->kobj);
return;
}
/*
* As soon as the sysfs file for this dump is created/activated there is
* a chance the opal_errd daemon (or any userspace) might read and
* acknowledge the dump before kobject_uevent() is called. If that
* happens then there is a potential race between
* dump_ack_store->kobject_put() and kobject_uevent() which leads to a
* use-after-free of a kernfs object resulting in a kernel crash.
*
* To avoid that, we need to take a reference on behalf of the bin file,
* so that our reference remains valid while we call kobject_uevent().
* We then drop our reference before exiting the function, leaving the
* bin file to drop the last reference (if it hasn't already).
*/
/* Take a reference for the bin file */
kobject_get(&dump->kobj);
rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
if (rc == 0) {
kobject_uevent(&dump->kobj, KOBJ_ADD);
pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
__func__, dump->id, dump->size);
} else {
/* Drop reference count taken for bin file */
kobject_put(&dump->kobj);
}
/* Drop our reference */
kobject_put(&dump->kobj);
return;
}
static irqreturn_t process_dump(int irq, void *data)
{
int rc;
uint32_t dump_id, dump_size, dump_type;
char name[22];
struct kobject *kobj;
rc = dump_read_info(&dump_id, &dump_size, &dump_type);
if (rc != OPAL_SUCCESS)
return IRQ_HANDLED;
sprintf(name, "0x%x-0x%x", dump_type, dump_id);
/* we may get notified twice, let's handle
* that gracefully and not create two conflicting
* entries.
*/
kobj = kset_find_obj(dump_kset, name);
if (kobj) {
/* Drop reference added by kset_find_obj() */
kobject_put(kobj);
return IRQ_HANDLED;
}
create_dump_obj(dump_id, dump_size, dump_type);
return IRQ_HANDLED;
}
void __init opal_platform_dump_init(void)
{
int rc;
int dump_irq;
/* Dump not supported by firmware */
if (!opal_check_token(OPAL_DUMP_READ))
return;
dump_kset = kset_create_and_add("dump", NULL, opal_kobj);
if (!dump_kset) {
pr_warn("%s: Failed to create dump kset\n", __func__);
return;
}
rc = sysfs_create_group(&dump_kset->kobj, &initiate_attr_group);
if (rc) {
pr_warn("%s: Failed to create initiate dump attr group\n",
__func__);
kobject_put(&dump_kset->kobj);
return;
}
dump_irq = opal_event_request(ilog2(OPAL_EVENT_DUMP_AVAIL));
if (!dump_irq) {
pr_err("%s: Can't register OPAL event irq (%d)\n",
__func__, dump_irq);
return;
}
rc = request_threaded_irq(dump_irq, NULL, process_dump,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"opal-dump", NULL);
if (rc) {
pr_err("%s: Can't request OPAL event irq (%d)\n",
__func__, rc);
return;
}
if (opal_check_token(OPAL_DUMP_RESEND))
opal_dump_resend_notification();
}
| linux-master | arch/powerpc/platforms/powernv/opal-dump.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016-17 IBM Corp.
*/
#define pr_fmt(fmt) "vas: " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <asm/prom.h>
#include <asm/xive.h>
#include "vas.h"
DEFINE_MUTEX(vas_mutex);
static LIST_HEAD(vas_instances);
static DEFINE_PER_CPU(int, cpu_vas_id);
static int vas_irq_fault_window_setup(struct vas_instance *vinst)
{
int rc = 0;
rc = request_threaded_irq(vinst->virq, vas_fault_handler,
vas_fault_thread_fn, 0, vinst->name, vinst);
if (rc) {
pr_err("VAS[%d]: Request IRQ(%d) failed with %d\n",
vinst->vas_id, vinst->virq, rc);
goto out;
}
rc = vas_setup_fault_window(vinst);
if (rc)
free_irq(vinst->virq, vinst);
out:
return rc;
}
static int init_vas_instance(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct vas_instance *vinst;
struct xive_irq_data *xd;
uint32_t chipid, hwirq;
struct resource *res;
int rc, cpu, vasid;
rc = of_property_read_u32(dn, "ibm,vas-id", &vasid);
if (rc) {
pr_err("No ibm,vas-id property for %s?\n", pdev->name);
return -ENODEV;
}
rc = of_property_read_u32(dn, "ibm,chip-id", &chipid);
if (rc) {
pr_err("No ibm,chip-id property for %s?\n", pdev->name);
return -ENODEV;
}
if (pdev->num_resources != 4) {
pr_err("Unexpected DT configuration for [%s, %d]\n",
pdev->name, vasid);
return -ENODEV;
}
vinst = kzalloc(sizeof(*vinst), GFP_KERNEL);
if (!vinst)
return -ENOMEM;
vinst->name = kasprintf(GFP_KERNEL, "vas-%d", vasid);
if (!vinst->name) {
kfree(vinst);
return -ENOMEM;
}
INIT_LIST_HEAD(&vinst->node);
ida_init(&vinst->ida);
mutex_init(&vinst->mutex);
vinst->vas_id = vasid;
vinst->pdev = pdev;
res = &pdev->resource[0];
vinst->hvwc_bar_start = res->start;
res = &pdev->resource[1];
vinst->uwc_bar_start = res->start;
res = &pdev->resource[2];
vinst->paste_base_addr = res->start;
res = &pdev->resource[3];
if (res->end > 62) {
pr_err("Bad 'paste_win_id_shift' in DT, %llx\n", res->end);
goto free_vinst;
}
vinst->paste_win_id_shift = 63 - res->end;
hwirq = xive_native_alloc_irq_on_chip(chipid);
if (!hwirq) {
pr_err("Inst%d: Unable to allocate global irq for chip %d\n",
vinst->vas_id, chipid);
return -ENOENT;
}
vinst->virq = irq_create_mapping(NULL, hwirq);
if (!vinst->virq) {
pr_err("Inst%d: Unable to map global irq %d\n",
vinst->vas_id, hwirq);
return -EINVAL;
}
xd = irq_get_handler_data(vinst->virq);
if (!xd) {
pr_err("Inst%d: Invalid virq %d\n",
vinst->vas_id, vinst->virq);
return -EINVAL;
}
vinst->irq_port = xd->trig_page;
pr_devel("Initialized instance [%s, %d] paste_base 0x%llx paste_win_id_shift 0x%llx IRQ %d Port 0x%llx\n",
pdev->name, vasid, vinst->paste_base_addr,
vinst->paste_win_id_shift, vinst->virq,
vinst->irq_port);
for_each_possible_cpu(cpu) {
if (cpu_to_chip_id(cpu) == of_get_ibm_chip_id(dn))
per_cpu(cpu_vas_id, cpu) = vasid;
}
mutex_lock(&vas_mutex);
list_add(&vinst->node, &vas_instances);
mutex_unlock(&vas_mutex);
spin_lock_init(&vinst->fault_lock);
/*
* IRQ and fault handling setup is needed only for user space
* send windows.
*/
if (vinst->virq) {
rc = vas_irq_fault_window_setup(vinst);
/*
* Fault window is used only for user space send windows.
* So if vinst->virq is NULL, tx_win_open returns -ENODEV
* for user space.
*/
if (rc)
vinst->virq = 0;
}
vas_instance_init_dbgdir(vinst);
dev_set_drvdata(&pdev->dev, vinst);
return 0;
free_vinst:
kfree(vinst->name);
kfree(vinst);
return -ENODEV;
}
/*
* Although this is read/used multiple times, it is written to only
* during initialization.
*/
struct vas_instance *find_vas_instance(int vasid)
{
struct list_head *ent;
struct vas_instance *vinst;
mutex_lock(&vas_mutex);
if (vasid == -1)
vasid = per_cpu(cpu_vas_id, smp_processor_id());
list_for_each(ent, &vas_instances) {
vinst = list_entry(ent, struct vas_instance, node);
if (vinst->vas_id == vasid) {
mutex_unlock(&vas_mutex);
return vinst;
}
}
mutex_unlock(&vas_mutex);
pr_devel("Instance %d not found\n", vasid);
return NULL;
}
int chip_to_vas_id(int chipid)
{
int cpu;
for_each_possible_cpu(cpu) {
if (cpu_to_chip_id(cpu) == chipid)
return per_cpu(cpu_vas_id, cpu);
}
return -1;
}
EXPORT_SYMBOL(chip_to_vas_id);
static int vas_probe(struct platform_device *pdev)
{
return init_vas_instance(pdev);
}
static const struct of_device_id powernv_vas_match[] = {
{ .compatible = "ibm,vas",},
{},
};
static struct platform_driver vas_driver = {
.driver = {
.name = "vas",
.of_match_table = powernv_vas_match,
},
.probe = vas_probe,
};
static int __init vas_init(void)
{
int found = 0;
struct device_node *dn;
platform_driver_register(&vas_driver);
for_each_compatible_node(dn, NULL, "ibm,vas") {
of_platform_device_create(dn, NULL, NULL);
found++;
}
if (!found) {
platform_driver_unregister(&vas_driver);
return -ENODEV;
}
pr_devel("Found %d instances\n", found);
return 0;
}
device_initcall(vas_init);
| linux-master | arch/powerpc/platforms/powernv/vas.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) IBM Corporation, 2014, 2017
* Anton Blanchard, Rashmica Gupta.
*/
#define pr_fmt(fmt) "memtrace: " fmt
#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/numa.h>
#include <asm/machdep.h>
#include <asm/cacheflush.h>
/* This enables us to keep track of the memory removed from each node. */
struct memtrace_entry {
void *mem;
u64 start;
u64 size;
u32 nid;
struct dentry *dir;
char name[16];
};
static DEFINE_MUTEX(memtrace_mutex);
static u64 memtrace_size;
static struct memtrace_entry *memtrace_array;
static unsigned int memtrace_array_nr;
static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct memtrace_entry *ent = filp->private_data;
return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
}
static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct memtrace_entry *ent = filp->private_data;
if (ent->size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (vma->vm_pgoff << PAGE_SHIFT >= ent->size)
return -EINVAL;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma, vma->vm_start, PHYS_PFN(ent->start) + vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
static const struct file_operations memtrace_fops = {
.llseek = default_llseek,
.read = memtrace_read,
.open = simple_open,
.mmap = memtrace_mmap,
};
#define FLUSH_CHUNK_SIZE SZ_1G
/**
* flush_dcache_range_chunked(): Write any modified data cache blocks out to
* memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
* Does not invalidate the corresponding instruction cache blocks.
*
* @start: the start address
* @stop: the stop address (exclusive)
* @chunk: the max size of the chunks
*/
static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
unsigned long chunk)
{
unsigned long i;
for (i = start; i < stop; i += chunk) {
flush_dcache_range(i, min(stop, i + chunk));
cond_resched();
}
}
static void memtrace_clear_range(unsigned long start_pfn,
unsigned long nr_pages)
{
unsigned long pfn;
/* As HIGHMEM does not apply, use clear_page() directly. */
for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
cond_resched();
clear_page(__va(PFN_PHYS(pfn)));
}
/*
* Before we go ahead and use this range as cache inhibited range
* flush the cache.
*/
flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
(unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
FLUSH_CHUNK_SIZE);
}
static u64 memtrace_alloc_node(u32 nid, u64 size)
{
const unsigned long nr_pages = PHYS_PFN(size);
unsigned long pfn, start_pfn;
struct page *page;
/*
* Trace memory needs to be aligned to the size, which is guaranteed
* by alloc_contig_pages().
*/
page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE |
__GFP_NOWARN, nid, NULL);
if (!page)
return 0;
start_pfn = page_to_pfn(page);
/*
* Clear the range while we still have a linear mapping.
*
* TODO: use __GFP_ZERO with alloc_contig_pages() once supported.
*/
memtrace_clear_range(start_pfn, nr_pages);
/*
* Set pages PageOffline(), to indicate that nobody (e.g., hibernation,
* dumping, ...) should be touching these pages.
*/
for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
__SetPageOffline(pfn_to_page(pfn));
arch_remove_linear_mapping(PFN_PHYS(start_pfn), size);
return PFN_PHYS(start_pfn);
}
static int memtrace_init_regions_runtime(u64 size)
{
u32 nid;
u64 m;
memtrace_array = kcalloc(num_online_nodes(),
sizeof(struct memtrace_entry), GFP_KERNEL);
if (!memtrace_array) {
pr_err("Failed to allocate memtrace_array\n");
return -EINVAL;
}
for_each_online_node(nid) {
m = memtrace_alloc_node(nid, size);
/*
* A node might not have any local memory, so warn but
* continue on.
*/
if (!m) {
pr_err("Failed to allocate trace memory on node %d\n", nid);
continue;
}
pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m);
memtrace_array[memtrace_array_nr].start = m;
memtrace_array[memtrace_array_nr].size = size;
memtrace_array[memtrace_array_nr].nid = nid;
memtrace_array_nr++;
}
return 0;
}
static struct dentry *memtrace_debugfs_dir;
static int memtrace_init_debugfs(void)
{
int ret = 0;
int i;
for (i = 0; i < memtrace_array_nr; i++) {
struct dentry *dir;
struct memtrace_entry *ent = &memtrace_array[i];
ent->mem = ioremap(ent->start, ent->size);
/* Warn but continue on */
if (!ent->mem) {
pr_err("Failed to map trace memory at 0x%llx\n",
ent->start);
ret = -1;
continue;
}
snprintf(ent->name, 16, "%08x", ent->nid);
dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
ent->dir = dir;
debugfs_create_file_unsafe("trace", 0600, dir, ent, &memtrace_fops);
debugfs_create_x64("start", 0400, dir, &ent->start);
debugfs_create_x64("size", 0400, dir, &ent->size);
}
return ret;
}
static int memtrace_free(int nid, u64 start, u64 size)
{
struct mhp_params params = { .pgprot = PAGE_KERNEL };
const unsigned long nr_pages = PHYS_PFN(size);
const unsigned long start_pfn = PHYS_PFN(start);
unsigned long pfn;
int ret;
ret = arch_create_linear_mapping(nid, start, size, ¶ms);
if (ret)
return ret;
for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
__ClearPageOffline(pfn_to_page(pfn));
free_contig_range(start_pfn, nr_pages);
return 0;
}
/*
* Iterate through the chunks of memory we allocated and attempt to expose
* them back to the kernel.
*/
static int memtrace_free_regions(void)
{
int i, ret = 0;
struct memtrace_entry *ent;
for (i = memtrace_array_nr - 1; i >= 0; i--) {
ent = &memtrace_array[i];
/* We have freed this chunk previously */
if (ent->nid == NUMA_NO_NODE)
continue;
/* Remove from io mappings */
if (ent->mem) {
iounmap(ent->mem);
ent->mem = 0;
}
if (memtrace_free(ent->nid, ent->start, ent->size)) {
pr_err("Failed to free trace memory on node %d\n",
ent->nid);
ret += 1;
continue;
}
/*
* Memory was freed successfully so clean up references to it
* so on reentry we can tell that this chunk was freed.
*/
debugfs_remove_recursive(ent->dir);
pr_info("Freed trace memory back on node %d\n", ent->nid);
ent->size = ent->start = ent->nid = NUMA_NO_NODE;
}
if (ret)
return ret;
/* If all chunks of memory were freed successfully, reset globals */
kfree(memtrace_array);
memtrace_array = NULL;
memtrace_size = 0;
memtrace_array_nr = 0;
return 0;
}
static int memtrace_enable_set(void *data, u64 val)
{
int rc = -EAGAIN;
u64 bytes;
/*
* Don't attempt to do anything if size isn't aligned to a memory
* block or equal to zero.
*/
bytes = memory_block_size_bytes();
if (val & (bytes - 1)) {
pr_err("Value must be aligned with 0x%llx\n", bytes);
return -EINVAL;
}
mutex_lock(&memtrace_mutex);
/* Free all previously allocated memory. */
if (memtrace_size && memtrace_free_regions())
goto out_unlock;
if (!val) {
rc = 0;
goto out_unlock;
}
/* Allocate memory. */
if (memtrace_init_regions_runtime(val))
goto out_unlock;
if (memtrace_init_debugfs())
goto out_unlock;
memtrace_size = val;
rc = 0;
out_unlock:
mutex_unlock(&memtrace_mutex);
return rc;
}
static int memtrace_enable_get(void *data, u64 *val)
{
*val = memtrace_size;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get,
memtrace_enable_set, "0x%016llx\n");
static int memtrace_init(void)
{
memtrace_debugfs_dir = debugfs_create_dir("memtrace",
arch_debugfs_dir);
debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
NULL, &memtrace_init_fops);
return 0;
}
machine_device_initcall(powernv, memtrace_init);
| linux-master | arch/powerpc/platforms/powernv/memtrace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Interface for exporting the OPAL ELF core.
* Heavily inspired from fs/proc/vmcore.c
*
* Copyright 2019, Hari Bathini, IBM Corporation.
*/
#define pr_fmt(fmt) "opal core: " fmt
#include <linux/memblock.h>
#include <linux/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/crash_core.h>
#include <linux/of.h>
#include <asm/page.h>
#include <asm/opal.h>
#include <asm/fadump-internal.h>
#include "opal-fadump.h"
#define MAX_PT_LOAD_CNT 8
/* NT_AUXV note related info */
#define AUXV_CNT 1
#define AUXV_DESC_SZ (((2 * AUXV_CNT) + 1) * sizeof(Elf64_Off))
struct opalcore_config {
u32 num_cpus;
/* PIR value of crashing CPU */
u32 crashing_cpu;
/* CPU state data info from F/W */
u64 cpu_state_destination_vaddr;
u64 cpu_state_data_size;
u64 cpu_state_entry_size;
/* OPAL memory to be exported as PT_LOAD segments */
u64 ptload_addr[MAX_PT_LOAD_CNT];
u64 ptload_size[MAX_PT_LOAD_CNT];
u64 ptload_cnt;
/* Pointer to the first PT_LOAD in the ELF core file */
Elf64_Phdr *ptload_phdr;
/* Total size of opalcore file. */
size_t opalcore_size;
/* Buffer for all the ELF core headers and the PT_NOTE */
size_t opalcorebuf_sz;
char *opalcorebuf;
/* NT_AUXV buffer */
char auxv_buf[AUXV_DESC_SZ];
};
struct opalcore {
struct list_head list;
u64 paddr;
size_t size;
loff_t offset;
};
static LIST_HEAD(opalcore_list);
static struct opalcore_config *oc_conf;
static const struct opal_mpipl_fadump *opalc_metadata;
static const struct opal_mpipl_fadump *opalc_cpu_metadata;
static struct kobject *mpipl_kobj;
/*
* Set crashing CPU's signal to SIGUSR1. if the kernel is triggered
* by kernel, SIGTERM otherwise.
*/
bool kernel_initiated;
static struct opalcore * __init get_new_element(void)
{
return kzalloc(sizeof(struct opalcore), GFP_KERNEL);
}
static inline int is_opalcore_usable(void)
{
return (oc_conf && oc_conf->opalcorebuf != NULL) ? 1 : 0;
}
static Elf64_Word *__init append_elf64_note(Elf64_Word *buf, char *name,
u32 type, void *data,
size_t data_len)
{
Elf64_Nhdr *note = (Elf64_Nhdr *)buf;
Elf64_Word namesz = strlen(name) + 1;
note->n_namesz = cpu_to_be32(namesz);
note->n_descsz = cpu_to_be32(data_len);
note->n_type = cpu_to_be32(type);
buf += DIV_ROUND_UP(sizeof(*note), sizeof(Elf64_Word));
memcpy(buf, name, namesz);
buf += DIV_ROUND_UP(namesz, sizeof(Elf64_Word));
memcpy(buf, data, data_len);
buf += DIV_ROUND_UP(data_len, sizeof(Elf64_Word));
return buf;
}
static void __init fill_prstatus(struct elf_prstatus *prstatus, int pir,
struct pt_regs *regs)
{
memset(prstatus, 0, sizeof(struct elf_prstatus));
elf_core_copy_regs(&(prstatus->pr_reg), regs);
/*
* Overload PID with PIR value.
* As a PIR value could also be '0', add an offset of '100'
* to every PIR to avoid misinterpretations in GDB.
*/
prstatus->common.pr_pid = cpu_to_be32(100 + pir);
prstatus->common.pr_ppid = cpu_to_be32(1);
/*
* Indicate SIGUSR1 for crash initiated from kernel.
* SIGTERM otherwise.
*/
if (pir == oc_conf->crashing_cpu) {
short sig;
sig = kernel_initiated ? SIGUSR1 : SIGTERM;
prstatus->common.pr_cursig = cpu_to_be16(sig);
}
}
static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf,
u64 opal_boot_entry)
{
Elf64_Off *bufp = (Elf64_Off *)oc_conf->auxv_buf;
int idx = 0;
memset(bufp, 0, AUXV_DESC_SZ);
/* Entry point of OPAL */
bufp[idx++] = cpu_to_be64(AT_ENTRY);
bufp[idx++] = cpu_to_be64(opal_boot_entry);
/* end of vector */
bufp[idx++] = cpu_to_be64(AT_NULL);
buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_AUXV,
oc_conf->auxv_buf, AUXV_DESC_SZ);
return buf;
}
/*
* Read from the ELF header and then the crash dump.
* Returns number of bytes read on success, -errno on failure.
*/
static ssize_t read_opalcore(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *to,
loff_t pos, size_t count)
{
struct opalcore *m;
ssize_t tsz, avail;
loff_t tpos = pos;
if (pos >= oc_conf->opalcore_size)
return 0;
/* Adjust count if it goes beyond opalcore size */
avail = oc_conf->opalcore_size - pos;
if (count > avail)
count = avail;
if (count == 0)
return 0;
/* Read ELF core header and/or PT_NOTE segment */
if (tpos < oc_conf->opalcorebuf_sz) {
tsz = min_t(size_t, oc_conf->opalcorebuf_sz - tpos, count);
memcpy(to, oc_conf->opalcorebuf + tpos, tsz);
to += tsz;
tpos += tsz;
count -= tsz;
}
list_for_each_entry(m, &opalcore_list, list) {
/* nothing more to read here */
if (count == 0)
break;
if (tpos < m->offset + m->size) {
void *addr;
tsz = min_t(size_t, m->offset + m->size - tpos, count);
addr = (void *)(m->paddr + tpos - m->offset);
memcpy(to, __va(addr), tsz);
to += tsz;
tpos += tsz;
count -= tsz;
}
}
return (tpos - pos);
}
static struct bin_attribute opal_core_attr = {
.attr = {.name = "core", .mode = 0400},
.read = read_opalcore
};
/*
* Read CPU state dump data and convert it into ELF notes.
*
* Each register entry is of 16 bytes, A numerical identifier along with
* a GPR/SPR flag in the first 8 bytes and the register value in the next
* 8 bytes. For more details refer to F/W documentation.
*/
static Elf64_Word * __init opalcore_append_cpu_notes(Elf64_Word *buf)
{
u32 thread_pir, size_per_thread, regs_offset, regs_cnt, reg_esize;
struct hdat_fadump_thread_hdr *thdr;
struct elf_prstatus prstatus;
Elf64_Word *first_cpu_note;
struct pt_regs regs;
char *bufp;
int i;
size_per_thread = oc_conf->cpu_state_entry_size;
bufp = __va(oc_conf->cpu_state_destination_vaddr);
/*
* Offset for register entries, entry size and registers count is
* duplicated in every thread header in keeping with HDAT format.
* Use these values from the first thread header.
*/
thdr = (struct hdat_fadump_thread_hdr *)bufp;
regs_offset = (offsetof(struct hdat_fadump_thread_hdr, offset) +
be32_to_cpu(thdr->offset));
reg_esize = be32_to_cpu(thdr->esize);
regs_cnt = be32_to_cpu(thdr->ecnt);
pr_debug("--------CPU State Data------------\n");
pr_debug("NumCpus : %u\n", oc_conf->num_cpus);
pr_debug("\tOffset: %u, Entry size: %u, Cnt: %u\n",
regs_offset, reg_esize, regs_cnt);
/*
* Skip past the first CPU note. Fill this note with the
* crashing CPU's prstatus.
*/
first_cpu_note = buf;
buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
&prstatus, sizeof(prstatus));
for (i = 0; i < oc_conf->num_cpus; i++, bufp += size_per_thread) {
thdr = (struct hdat_fadump_thread_hdr *)bufp;
thread_pir = be32_to_cpu(thdr->pir);
pr_debug("[%04d] PIR: 0x%x, core state: 0x%02x\n",
i, thread_pir, thdr->core_state);
/*
* Register state data of MAX cores is provided by firmware,
* but some of this cores may not be active. So, while
* processing register state data, check core state and
* skip threads that belong to inactive cores.
*/
if (thdr->core_state == HDAT_FADUMP_CORE_INACTIVE)
continue;
opal_fadump_read_regs((bufp + regs_offset), regs_cnt,
reg_esize, false, ®s);
pr_debug("PIR 0x%x - R1 : 0x%llx, NIP : 0x%llx\n", thread_pir,
be64_to_cpu(regs.gpr[1]), be64_to_cpu(regs.nip));
fill_prstatus(&prstatus, thread_pir, ®s);
if (thread_pir != oc_conf->crashing_cpu) {
buf = append_elf64_note(buf, CRASH_CORE_NOTE_NAME,
NT_PRSTATUS, &prstatus,
sizeof(prstatus));
} else {
/*
* Add crashing CPU as the first NT_PRSTATUS note for
* GDB to process the core file appropriately.
*/
append_elf64_note(first_cpu_note, CRASH_CORE_NOTE_NAME,
NT_PRSTATUS, &prstatus,
sizeof(prstatus));
}
}
return buf;
}
static int __init create_opalcore(void)
{
u64 opal_boot_entry, opal_base_addr, paddr;
u32 hdr_size, cpu_notes_size, count;
struct device_node *dn;
struct opalcore *new;
loff_t opalcore_off;
struct page *page;
Elf64_Phdr *phdr;
Elf64_Ehdr *elf;
int i, ret;
char *bufp;
/* Get size of header & CPU notes for OPAL core */
hdr_size = (sizeof(Elf64_Ehdr) +
((oc_conf->ptload_cnt + 1) * sizeof(Elf64_Phdr)));
cpu_notes_size = ((oc_conf->num_cpus * (CRASH_CORE_NOTE_HEAD_BYTES +
CRASH_CORE_NOTE_NAME_BYTES +
CRASH_CORE_NOTE_DESC_BYTES)) +
(CRASH_CORE_NOTE_HEAD_BYTES +
CRASH_CORE_NOTE_NAME_BYTES + AUXV_DESC_SZ));
/* Allocate buffer to setup OPAL core */
oc_conf->opalcorebuf_sz = PAGE_ALIGN(hdr_size + cpu_notes_size);
oc_conf->opalcorebuf = alloc_pages_exact(oc_conf->opalcorebuf_sz,
GFP_KERNEL | __GFP_ZERO);
if (!oc_conf->opalcorebuf) {
pr_err("Not enough memory to setup OPAL core (size: %lu)\n",
oc_conf->opalcorebuf_sz);
oc_conf->opalcorebuf_sz = 0;
return -ENOMEM;
}
count = oc_conf->opalcorebuf_sz / PAGE_SIZE;
page = virt_to_page(oc_conf->opalcorebuf);
for (i = 0; i < count; i++)
mark_page_reserved(page + i);
pr_debug("opalcorebuf = 0x%llx\n", (u64)oc_conf->opalcorebuf);
/* Read OPAL related device-tree entries */
dn = of_find_node_by_name(NULL, "ibm,opal");
if (dn) {
ret = of_property_read_u64(dn, "opal-base-address",
&opal_base_addr);
pr_debug("opal-base-address: %llx\n", opal_base_addr);
ret |= of_property_read_u64(dn, "opal-boot-address",
&opal_boot_entry);
pr_debug("opal-boot-address: %llx\n", opal_boot_entry);
}
if (!dn || ret)
pr_warn("WARNING: Failed to read OPAL base & entry values\n");
of_node_put(dn);
/* Use count to keep track of the program headers */
count = 0;
bufp = oc_conf->opalcorebuf;
elf = (Elf64_Ehdr *)bufp;
bufp += sizeof(Elf64_Ehdr);
memcpy(elf->e_ident, ELFMAG, SELFMAG);
elf->e_ident[EI_CLASS] = ELF_CLASS;
elf->e_ident[EI_DATA] = ELFDATA2MSB;
elf->e_ident[EI_VERSION] = EV_CURRENT;
elf->e_ident[EI_OSABI] = ELF_OSABI;
memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
elf->e_type = cpu_to_be16(ET_CORE);
elf->e_machine = cpu_to_be16(ELF_ARCH);
elf->e_version = cpu_to_be32(EV_CURRENT);
elf->e_entry = 0;
elf->e_phoff = cpu_to_be64(sizeof(Elf64_Ehdr));
elf->e_shoff = 0;
elf->e_flags = 0;
elf->e_ehsize = cpu_to_be16(sizeof(Elf64_Ehdr));
elf->e_phentsize = cpu_to_be16(sizeof(Elf64_Phdr));
elf->e_phnum = 0;
elf->e_shentsize = 0;
elf->e_shnum = 0;
elf->e_shstrndx = 0;
phdr = (Elf64_Phdr *)bufp;
bufp += sizeof(Elf64_Phdr);
phdr->p_type = cpu_to_be32(PT_NOTE);
phdr->p_flags = 0;
phdr->p_align = 0;
phdr->p_paddr = phdr->p_vaddr = 0;
phdr->p_offset = cpu_to_be64(hdr_size);
phdr->p_filesz = phdr->p_memsz = cpu_to_be64(cpu_notes_size);
count++;
opalcore_off = oc_conf->opalcorebuf_sz;
oc_conf->ptload_phdr = (Elf64_Phdr *)bufp;
paddr = 0;
for (i = 0; i < oc_conf->ptload_cnt; i++) {
phdr = (Elf64_Phdr *)bufp;
bufp += sizeof(Elf64_Phdr);
phdr->p_type = cpu_to_be32(PT_LOAD);
phdr->p_flags = cpu_to_be32(PF_R|PF_W|PF_X);
phdr->p_align = 0;
new = get_new_element();
if (!new)
return -ENOMEM;
new->paddr = oc_conf->ptload_addr[i];
new->size = oc_conf->ptload_size[i];
new->offset = opalcore_off;
list_add_tail(&new->list, &opalcore_list);
phdr->p_paddr = cpu_to_be64(paddr);
phdr->p_vaddr = cpu_to_be64(opal_base_addr + paddr);
phdr->p_filesz = phdr->p_memsz =
cpu_to_be64(oc_conf->ptload_size[i]);
phdr->p_offset = cpu_to_be64(opalcore_off);
count++;
opalcore_off += oc_conf->ptload_size[i];
paddr += oc_conf->ptload_size[i];
}
elf->e_phnum = cpu_to_be16(count);
bufp = (char *)opalcore_append_cpu_notes((Elf64_Word *)bufp);
bufp = (char *)auxv_to_elf64_notes((Elf64_Word *)bufp, opal_boot_entry);
oc_conf->opalcore_size = opalcore_off;
return 0;
}
static void opalcore_cleanup(void)
{
if (oc_conf == NULL)
return;
/* Remove OPAL core sysfs file */
sysfs_remove_bin_file(mpipl_kobj, &opal_core_attr);
oc_conf->ptload_phdr = NULL;
oc_conf->ptload_cnt = 0;
/* free the buffer used for setting up OPAL core */
if (oc_conf->opalcorebuf) {
void *end = (void *)((u64)oc_conf->opalcorebuf +
oc_conf->opalcorebuf_sz);
free_reserved_area(oc_conf->opalcorebuf, end, -1, NULL);
oc_conf->opalcorebuf = NULL;
oc_conf->opalcorebuf_sz = 0;
}
kfree(oc_conf);
oc_conf = NULL;
}
__exitcall(opalcore_cleanup);
static void __init opalcore_config_init(void)
{
u32 idx, cpu_data_version;
struct device_node *np;
const __be32 *prop;
u64 addr = 0;
int i, ret;
np = of_find_node_by_path("/ibm,opal/dump");
if (np == NULL)
return;
if (!of_device_is_compatible(np, "ibm,opal-dump")) {
pr_warn("Support missing for this f/w version!\n");
return;
}
/* Check if dump has been initiated on last reboot */
prop = of_get_property(np, "mpipl-boot", NULL);
if (!prop) {
of_node_put(np);
return;
}
/* Get OPAL metadata */
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_OPAL, &addr);
if ((ret != OPAL_SUCCESS) || !addr) {
pr_err("Failed to get OPAL metadata (%d)\n", ret);
goto error_out;
}
addr = be64_to_cpu(addr);
pr_debug("OPAL metadata addr: %llx\n", addr);
opalc_metadata = __va(addr);
/* Get OPAL CPU metadata */
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &addr);
if ((ret != OPAL_SUCCESS) || !addr) {
pr_err("Failed to get OPAL CPU metadata (%d)\n", ret);
goto error_out;
}
addr = be64_to_cpu(addr);
pr_debug("CPU metadata addr: %llx\n", addr);
opalc_cpu_metadata = __va(addr);
/* Allocate memory for config buffer */
oc_conf = kzalloc(sizeof(struct opalcore_config), GFP_KERNEL);
if (oc_conf == NULL)
goto error_out;
/* Parse OPAL metadata */
if (opalc_metadata->version != OPAL_MPIPL_VERSION) {
pr_warn("Supported OPAL metadata version: %u, found: %u!\n",
OPAL_MPIPL_VERSION, opalc_metadata->version);
pr_warn("WARNING: F/W using newer OPAL metadata format!!\n");
}
oc_conf->ptload_cnt = 0;
idx = be32_to_cpu(opalc_metadata->region_cnt);
if (idx > MAX_PT_LOAD_CNT) {
pr_warn("WARNING: OPAL regions count (%d) adjusted to limit (%d)",
idx, MAX_PT_LOAD_CNT);
idx = MAX_PT_LOAD_CNT;
}
for (i = 0; i < idx; i++) {
oc_conf->ptload_addr[oc_conf->ptload_cnt] =
be64_to_cpu(opalc_metadata->region[i].dest);
oc_conf->ptload_size[oc_conf->ptload_cnt++] =
be64_to_cpu(opalc_metadata->region[i].size);
}
oc_conf->ptload_cnt = i;
oc_conf->crashing_cpu = be32_to_cpu(opalc_metadata->crashing_pir);
if (!oc_conf->ptload_cnt) {
pr_err("OPAL memory regions not found\n");
goto error_out;
}
/* Parse OPAL CPU metadata */
cpu_data_version = be32_to_cpu(opalc_cpu_metadata->cpu_data_version);
if (cpu_data_version != HDAT_FADUMP_CPU_DATA_VER) {
pr_warn("Supported CPU data version: %u, found: %u!\n",
HDAT_FADUMP_CPU_DATA_VER, cpu_data_version);
pr_warn("WARNING: F/W using newer CPU state data format!!\n");
}
addr = be64_to_cpu(opalc_cpu_metadata->region[0].dest);
if (!addr) {
pr_err("CPU state data not found!\n");
goto error_out;
}
oc_conf->cpu_state_destination_vaddr = (u64)__va(addr);
oc_conf->cpu_state_data_size =
be64_to_cpu(opalc_cpu_metadata->region[0].size);
oc_conf->cpu_state_entry_size =
be32_to_cpu(opalc_cpu_metadata->cpu_data_size);
if ((oc_conf->cpu_state_entry_size == 0) ||
(oc_conf->cpu_state_entry_size > oc_conf->cpu_state_data_size)) {
pr_err("CPU state data is invalid.\n");
goto error_out;
}
oc_conf->num_cpus = (oc_conf->cpu_state_data_size /
oc_conf->cpu_state_entry_size);
of_node_put(np);
return;
error_out:
pr_err("Could not export /sys/firmware/opal/core\n");
opalcore_cleanup();
of_node_put(np);
}
static ssize_t release_core_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int input = -1;
if (kstrtoint(buf, 0, &input))
return -EINVAL;
if (input == 1) {
if (oc_conf == NULL) {
pr_err("'/sys/firmware/opal/core' file not accessible!\n");
return -EPERM;
}
/*
* Take away '/sys/firmware/opal/core' and release all memory
* used for exporting this file.
*/
opalcore_cleanup();
} else
return -EINVAL;
return count;
}
static struct kobj_attribute opalcore_rel_attr = __ATTR_WO(release_core);
static struct attribute *mpipl_attr[] = {
&opalcore_rel_attr.attr,
NULL,
};
static struct bin_attribute *mpipl_bin_attr[] = {
&opal_core_attr,
NULL,
};
static const struct attribute_group mpipl_group = {
.attrs = mpipl_attr,
.bin_attrs = mpipl_bin_attr,
};
static int __init opalcore_init(void)
{
int rc = -1;
opalcore_config_init();
if (oc_conf == NULL)
return rc;
create_opalcore();
/*
* If oc_conf->opalcorebuf= is set in the 2nd kernel,
* then capture the dump.
*/
if (!(is_opalcore_usable())) {
pr_err("Failed to export /sys/firmware/opal/mpipl/core\n");
opalcore_cleanup();
return rc;
}
/* Set OPAL core file size */
opal_core_attr.size = oc_conf->opalcore_size;
mpipl_kobj = kobject_create_and_add("mpipl", opal_kobj);
if (!mpipl_kobj) {
pr_err("unable to create mpipl kobject\n");
return -ENOMEM;
}
/* Export OPAL core sysfs file */
rc = sysfs_create_group(mpipl_kobj, &mpipl_group);
if (rc) {
pr_err("mpipl sysfs group creation failed (%d)", rc);
opalcore_cleanup();
return rc;
}
/* The /sys/firmware/opal/core is moved to /sys/firmware/opal/mpipl/
* directory, need to create symlink at old location to maintain
* backward compatibility.
*/
rc = compat_only_sysfs_link_entry_to_kobj(opal_kobj, mpipl_kobj,
"core", NULL);
if (rc) {
pr_err("unable to create core symlink (%d)\n", rc);
return rc;
}
return 0;
}
fs_initcall(opalcore_init);
| linux-master | arch/powerpc/platforms/powernv/opal-core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PowerNV SCOM bus debugfs interface
*
* Copyright 2010 Benjamin Herrenschmidt, IBM Corp
* <[email protected]>
* and David Gibson, IBM Corporation.
* Copyright 2013 IBM Corp.
*/
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/bug.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/opal.h>
#include <asm/prom.h>
static u64 opal_scom_unmangle(u64 addr)
{
u64 tmp;
/*
* XSCOM addresses use the top nibble to set indirect mode and
* its form. Bits 4-11 are always 0.
*
* Because the debugfs interface uses signed offsets and shifts
* the address left by 3, we basically cannot use the top 4 bits
* of the 64-bit address, and thus cannot use the indirect bit.
*
* To deal with that, we support the indirect bits being in
* bits 4-7 (IBM notation) instead of bit 0-3 in this API, we
* do the conversion here.
*
* For in-kernel use, we don't need to do this mangling. In
* kernel won't have bits 4-7 set.
*
* So:
* debugfs will always set 0-3 = 0 and clear 4-7
* kernel will always clear 0-3 = 0 and set 4-7
*/
tmp = addr;
tmp &= 0x0f00000000000000;
addr &= 0xf0ffffffffffffff;
addr |= tmp << 4;
return addr;
}
static int opal_scom_read(uint32_t chip, uint64_t addr, u64 reg, u64 *value)
{
int64_t rc;
__be64 v;
reg = opal_scom_unmangle(addr + reg);
rc = opal_xscom_read(chip, reg, (__be64 *)__pa(&v));
if (rc) {
*value = 0xfffffffffffffffful;
return -EIO;
}
*value = be64_to_cpu(v);
return 0;
}
static int opal_scom_write(uint32_t chip, uint64_t addr, u64 reg, u64 value)
{
int64_t rc;
reg = opal_scom_unmangle(addr + reg);
rc = opal_xscom_write(chip, reg, value);
if (rc)
return -EIO;
return 0;
}
struct scom_debug_entry {
u32 chip;
struct debugfs_blob_wrapper path;
char name[16];
};
static ssize_t scom_debug_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct scom_debug_entry *ent = filp->private_data;
u64 __user *ubuf64 = (u64 __user *)ubuf;
loff_t off = *ppos;
ssize_t done = 0;
u64 reg, reg_base, reg_cnt, val;
int rc;
if (off < 0 || (off & 7) || (count & 7))
return -EINVAL;
reg_base = off >> 3;
reg_cnt = count >> 3;
for (reg = 0; reg < reg_cnt; reg++) {
rc = opal_scom_read(ent->chip, reg_base, reg, &val);
if (!rc)
rc = put_user(val, ubuf64);
if (rc) {
if (!done)
done = rc;
break;
}
ubuf64++;
*ppos += 8;
done += 8;
}
return done;
}
static ssize_t scom_debug_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct scom_debug_entry *ent = filp->private_data;
u64 __user *ubuf64 = (u64 __user *)ubuf;
loff_t off = *ppos;
ssize_t done = 0;
u64 reg, reg_base, reg_cnt, val;
int rc;
if (off < 0 || (off & 7) || (count & 7))
return -EINVAL;
reg_base = off >> 3;
reg_cnt = count >> 3;
for (reg = 0; reg < reg_cnt; reg++) {
rc = get_user(val, ubuf64);
if (!rc)
rc = opal_scom_write(ent->chip, reg_base, reg, val);
if (rc) {
if (!done)
done = rc;
break;
}
ubuf64++;
done += 8;
}
return done;
}
static const struct file_operations scom_debug_fops = {
.read = scom_debug_read,
.write = scom_debug_write,
.open = simple_open,
.llseek = default_llseek,
};
static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
int chip)
{
struct scom_debug_entry *ent;
struct dentry *dir;
ent = kzalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)
return -ENOMEM;
ent->chip = chip;
snprintf(ent->name, 16, "%08x", chip);
ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn);
ent->path.size = strlen((char *)ent->path.data);
dir = debugfs_create_dir(ent->name, root);
if (IS_ERR(dir)) {
kfree(ent->path.data);
kfree(ent);
return -1;
}
debugfs_create_blob("devspec", 0400, dir, &ent->path);
debugfs_create_file("access", 0600, dir, ent, &scom_debug_fops);
return 0;
}
static int scom_debug_init(void)
{
struct device_node *dn;
struct dentry *root;
int chip, rc;
if (!firmware_has_feature(FW_FEATURE_OPAL))
return 0;
root = debugfs_create_dir("scom", arch_debugfs_dir);
if (IS_ERR(root))
return -1;
rc = 0;
for_each_node_with_property(dn, "scom-controller") {
chip = of_get_ibm_chip_id(dn);
WARN_ON(chip == -1);
rc |= scom_debug_init_one(root, dn, chip);
}
return rc;
}
device_initcall(scom_debug_init);
| linux-master | arch/powerpc/platforms/powernv/opal-xscom.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PowerNV OPAL in-memory console interface
*
* Copyright 2014 IBM Corp.
*/
#include <asm/io.h>
#include <asm/opal.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include "powernv.h"
/* OPAL in-memory console. Defined in OPAL source at core/console.c */
struct memcons {
__be64 magic;
#define MEMCONS_MAGIC 0x6630696567726173L
__be64 obuf_phys;
__be64 ibuf_phys;
__be32 obuf_size;
__be32 ibuf_size;
__be32 out_pos;
#define MEMCONS_OUT_POS_WRAP 0x80000000u
#define MEMCONS_OUT_POS_MASK 0x00ffffffu
__be32 in_prod;
__be32 in_cons;
};
static struct memcons *opal_memcons = NULL;
ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count)
{
const char *conbuf;
ssize_t ret;
size_t first_read = 0;
uint32_t out_pos, avail;
if (!mc)
return -ENODEV;
out_pos = be32_to_cpu(READ_ONCE(mc->out_pos));
/* Now we've read out_pos, put a barrier in before reading the new
* data it points to in conbuf. */
smp_rmb();
conbuf = phys_to_virt(be64_to_cpu(mc->obuf_phys));
/* When the buffer has wrapped, read from the out_pos marker to the end
* of the buffer, and then read the remaining data as in the un-wrapped
* case. */
if (out_pos & MEMCONS_OUT_POS_WRAP) {
out_pos &= MEMCONS_OUT_POS_MASK;
avail = be32_to_cpu(mc->obuf_size) - out_pos;
ret = memory_read_from_buffer(to, count, &pos,
conbuf + out_pos, avail);
if (ret < 0)
goto out;
first_read = ret;
to += first_read;
count -= first_read;
pos -= avail;
if (count <= 0)
goto out;
}
/* Sanity check. The firmware should not do this to us. */
if (out_pos > be32_to_cpu(mc->obuf_size)) {
pr_err("OPAL: memory console corruption. Aborting read.\n");
return -EINVAL;
}
ret = memory_read_from_buffer(to, count, &pos, conbuf, out_pos);
if (ret < 0)
goto out;
ret += first_read;
out:
return ret;
}
ssize_t opal_msglog_copy(char *to, loff_t pos, size_t count)
{
return memcons_copy(opal_memcons, to, pos, count);
}
static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
struct bin_attribute *bin_attr, char *to,
loff_t pos, size_t count)
{
return opal_msglog_copy(to, pos, count);
}
static struct bin_attribute opal_msglog_attr = {
.attr = {.name = "msglog", .mode = 0400},
.read = opal_msglog_read
};
struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name)
{
u64 mcaddr;
struct memcons *mc;
if (of_property_read_u64(node, mc_prop_name, &mcaddr)) {
pr_warn("%s property not found, no message log\n",
mc_prop_name);
goto out_err;
}
mc = phys_to_virt(mcaddr);
if (!mc) {
pr_warn("memory console address is invalid\n");
goto out_err;
}
if (be64_to_cpu(mc->magic) != MEMCONS_MAGIC) {
pr_warn("memory console version is invalid\n");
goto out_err;
}
return mc;
out_err:
return NULL;
}
u32 __init memcons_get_size(struct memcons *mc)
{
return be32_to_cpu(mc->ibuf_size) + be32_to_cpu(mc->obuf_size);
}
void __init opal_msglog_init(void)
{
opal_memcons = memcons_init(opal_node, "ibm,opal-memcons");
if (!opal_memcons) {
pr_warn("OPAL: memcons failed to load from ibm,opal-memcons\n");
return;
}
opal_msglog_attr.size = memcons_get_size(opal_memcons);
}
void __init opal_msglog_sysfs_init(void)
{
if (!opal_memcons) {
pr_warn("OPAL: message log initialisation failed, not creating sysfs entry\n");
return;
}
if (sysfs_create_bin_file(opal_kobj, &opal_msglog_attr) != 0)
pr_warn("OPAL: sysfs file creation failed\n");
}
| linux-master | arch/powerpc/platforms/powernv/opal-msglog.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.