python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* BSC913xRDB Board Setup
*
* Author: Priyanka Jain <[email protected]>
*
* Copyright 2011-2012 Freescale Semiconductor Inc.
*/
#include <linux/of.h>
#include <linux/pci.h>
#include <asm/mpic.h>
#include <sysdev/fsl_soc.h>
#include <asm/udbg.h>
#include "mpc85xx.h"
void __init bsc913x_rdb_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
if (!mpic)
pr_err("bsc913x: Failed to allocate MPIC structure\n");
else
mpic_init(mpic);
}
/*
* Setup the architecture
*/
static void __init bsc913x_rdb_setup_arch(void)
{
if (ppc_md.progress)
ppc_md.progress("bsc913x_rdb_setup_arch()", 0);
pr_info("bsc913x board from Freescale Semiconductor\n");
}
machine_device_initcall(bsc9131_rdb, mpc85xx_common_publish_devices);
define_machine(bsc9131_rdb) {
.name = "BSC9131 RDB",
.compatible = "fsl,bsc9131rdb",
.setup_arch = bsc913x_rdb_setup_arch,
.init_IRQ = bsc913x_rdb_pic_init,
.get_irq = mpic_get_irq,
.progress = udbg_progress,
};
| linux-master | arch/powerpc/platforms/85xx/bsc913x_rdb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008 Ilya Yanok, Emcraft Systems
*/
#include <linux/irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/io.h>
/*
* The FPGA supports 9 interrupt sources, which can be routed to 3
* interrupt request lines of the MPIC. The line to be used can be
* specified through the third cell of FDT property "interrupts".
*/
#define SOCRATES_FPGA_NUM_IRQS 9
#define FPGA_PIC_IRQCFG (0x0)
#define FPGA_PIC_IRQMASK(n) (0x4 + 0x4 * (n))
#define SOCRATES_FPGA_IRQ_MASK ((1 << SOCRATES_FPGA_NUM_IRQS) - 1)
struct socrates_fpga_irq_info {
unsigned int irq_line;
int type;
};
/*
* Interrupt routing and type table
*
* IRQ_TYPE_NONE means the interrupt type is configurable,
* otherwise it's fixed to the specified value.
*/
static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = {
[0] = {0, IRQ_TYPE_NONE},
[1] = {0, IRQ_TYPE_LEVEL_HIGH},
[2] = {0, IRQ_TYPE_LEVEL_LOW},
[3] = {0, IRQ_TYPE_NONE},
[4] = {0, IRQ_TYPE_NONE},
[5] = {0, IRQ_TYPE_NONE},
[6] = {0, IRQ_TYPE_NONE},
[7] = {0, IRQ_TYPE_NONE},
[8] = {0, IRQ_TYPE_LEVEL_HIGH},
};
static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock);
static void __iomem *socrates_fpga_pic_iobase;
static struct irq_domain *socrates_fpga_pic_irq_host;
static unsigned int socrates_fpga_irqs[3];
static inline uint32_t socrates_fpga_pic_read(int reg)
{
return in_be32(socrates_fpga_pic_iobase + reg);
}
static inline void socrates_fpga_pic_write(int reg, uint32_t val)
{
out_be32(socrates_fpga_pic_iobase + reg, val);
}
static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq)
{
uint32_t cause;
unsigned long flags;
int i;
/* Check irq line routed to the MPIC */
for (i = 0; i < 3; i++) {
if (irq == socrates_fpga_irqs[i])
break;
}
if (i == 3)
return 0;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
cause = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(i));
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
for (i = SOCRATES_FPGA_NUM_IRQS - 1; i >= 0; i--) {
if (cause >> (i + 16))
break;
}
return irq_linear_revmap(socrates_fpga_pic_irq_host,
(irq_hw_number_t)i);
}
static void socrates_fpga_pic_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int irq = irq_desc_get_irq(desc);
unsigned int cascade_irq;
/*
* See if we actually have an interrupt, call generic handling code if
* we do.
*/
cascade_irq = socrates_fpga_pic_get_irq(irq);
if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
static void socrates_fpga_pic_ack(struct irq_data *d)
{
unsigned long flags;
unsigned int irq_line, hwirq = irqd_to_hwirq(d);
uint32_t mask;
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
& SOCRATES_FPGA_IRQ_MASK;
mask |= (1 << (hwirq + 16));
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask);
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
}
static void socrates_fpga_pic_mask(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq = irqd_to_hwirq(d);
int irq_line;
u32 mask;
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
& SOCRATES_FPGA_IRQ_MASK;
mask &= ~(1 << hwirq);
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask);
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
}
static void socrates_fpga_pic_mask_ack(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq = irqd_to_hwirq(d);
int irq_line;
u32 mask;
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
& SOCRATES_FPGA_IRQ_MASK;
mask &= ~(1 << hwirq);
mask |= (1 << (hwirq + 16));
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask);
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
}
static void socrates_fpga_pic_unmask(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq = irqd_to_hwirq(d);
int irq_line;
u32 mask;
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
& SOCRATES_FPGA_IRQ_MASK;
mask |= (1 << hwirq);
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask);
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
}
static void socrates_fpga_pic_eoi(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq = irqd_to_hwirq(d);
int irq_line;
u32 mask;
irq_line = fpga_irqs[hwirq].irq_line;
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line))
& SOCRATES_FPGA_IRQ_MASK;
mask |= (1 << (hwirq + 16));
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(irq_line), mask);
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
}
static int socrates_fpga_pic_set_type(struct irq_data *d,
unsigned int flow_type)
{
unsigned long flags;
unsigned int hwirq = irqd_to_hwirq(d);
int polarity;
u32 mask;
if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE)
return -EINVAL;
switch (flow_type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_LEVEL_HIGH:
polarity = 1;
break;
case IRQ_TYPE_LEVEL_LOW:
polarity = 0;
break;
default:
return -EINVAL;
}
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
mask = socrates_fpga_pic_read(FPGA_PIC_IRQCFG);
if (polarity)
mask |= (1 << hwirq);
else
mask &= ~(1 << hwirq);
socrates_fpga_pic_write(FPGA_PIC_IRQCFG, mask);
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
return 0;
}
static struct irq_chip socrates_fpga_pic_chip = {
.name = "FPGA-PIC",
.irq_ack = socrates_fpga_pic_ack,
.irq_mask = socrates_fpga_pic_mask,
.irq_mask_ack = socrates_fpga_pic_mask_ack,
.irq_unmask = socrates_fpga_pic_unmask,
.irq_eoi = socrates_fpga_pic_eoi,
.irq_set_type = socrates_fpga_pic_set_type,
};
static int socrates_fpga_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
/* All interrupts are LEVEL sensitive */
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &socrates_fpga_pic_chip,
handle_fasteoi_irq);
return 0;
}
static int socrates_fpga_pic_host_xlate(struct irq_domain *h,
struct device_node *ct, const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
struct socrates_fpga_irq_info *fpga_irq = &fpga_irqs[intspec[0]];
*out_hwirq = intspec[0];
if (fpga_irq->type == IRQ_TYPE_NONE) {
/* type is configurable */
if (intspec[1] != IRQ_TYPE_LEVEL_LOW &&
intspec[1] != IRQ_TYPE_LEVEL_HIGH) {
pr_warn("FPGA PIC: invalid irq type, setting default active low\n");
*out_flags = IRQ_TYPE_LEVEL_LOW;
} else {
*out_flags = intspec[1];
}
} else {
/* type is fixed */
*out_flags = fpga_irq->type;
}
/* Use specified interrupt routing */
if (intspec[2] <= 2)
fpga_irq->irq_line = intspec[2];
else
pr_warn("FPGA PIC: invalid irq routing\n");
return 0;
}
static const struct irq_domain_ops socrates_fpga_pic_host_ops = {
.map = socrates_fpga_pic_host_map,
.xlate = socrates_fpga_pic_host_xlate,
};
void __init socrates_fpga_pic_init(struct device_node *pic)
{
unsigned long flags;
int i;
/* Setup an irq_domain structure */
socrates_fpga_pic_irq_host = irq_domain_add_linear(pic,
SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL);
if (socrates_fpga_pic_irq_host == NULL) {
pr_err("FPGA PIC: Unable to allocate host\n");
return;
}
for (i = 0; i < 3; i++) {
socrates_fpga_irqs[i] = irq_of_parse_and_map(pic, i);
if (!socrates_fpga_irqs[i]) {
pr_warn("FPGA PIC: can't get irq%d\n", i);
continue;
}
irq_set_chained_handler(socrates_fpga_irqs[i],
socrates_fpga_pic_cascade);
}
socrates_fpga_pic_iobase = of_iomap(pic, 0);
raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags);
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(0),
SOCRATES_FPGA_IRQ_MASK << 16);
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(1),
SOCRATES_FPGA_IRQ_MASK << 16);
socrates_fpga_pic_write(FPGA_PIC_IRQMASK(2),
SOCRATES_FPGA_IRQ_MASK << 16);
raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags);
pr_info("FPGA PIC: Setting up Socrates FPGA PIC\n");
}
| linux-master | arch/powerpc/platforms/85xx/socrates_fpga_pic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2007, Olof Johansson, PA Semi
*
* Based on arch/powerpc/sysdev/mpic_u3msi.c:
*
* Copyright 2006, Segher Boessenkool, IBM Corporation.
* Copyright 2006-2007, Michael Ellerman, IBM Corporation.
*/
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <asm/mpic.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/msi_bitmap.h>
#include <sysdev/mpic.h>
/* Allocate 16 interrupts per device, to give an alignment of 16,
* since that's the size of the grouping w.r.t. affinity. If someone
* needs more than 32 MSI's down the road we'll have to rethink this,
* but it should be OK for now.
*/
#define ALLOC_CHUNK 16
#define PASEMI_MSI_ADDR 0xfc080000
/* A bit ugly, can we get this from the pci_dev somehow? */
static struct mpic *msi_mpic;
static void mpic_pasemi_msi_mask_irq(struct irq_data *data)
{
pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq);
pci_msi_mask_irq(data);
mpic_mask_irq(data);
}
static void mpic_pasemi_msi_unmask_irq(struct irq_data *data)
{
pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq);
mpic_unmask_irq(data);
pci_msi_unmask_irq(data);
}
static struct irq_chip mpic_pasemi_msi_chip = {
.irq_shutdown = mpic_pasemi_msi_mask_irq,
.irq_mask = mpic_pasemi_msi_mask_irq,
.irq_unmask = mpic_pasemi_msi_unmask_irq,
.irq_eoi = mpic_end_irq,
.irq_set_type = mpic_set_irq_type,
.irq_set_affinity = mpic_set_affinity,
.name = "PASEMI-MSI",
};
static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
irq_hw_number_t hwirq;
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
entry->irq = 0;
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK);
}
}
static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
unsigned int virq;
struct msi_desc *entry;
struct msi_msg msg;
int hwirq;
if (type == PCI_CAP_ID_MSIX)
pr_debug("pasemi_msi: MSI-X untested, trying anyway\n");
pr_debug("pasemi_msi_setup_msi_irqs, pdev %p nvec %d type %d\n",
pdev, nvec, type);
msg.address_hi = 0;
msg.address_lo = PASEMI_MSI_ADDR;
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
/* Allocate 16 interrupts for now, since that's the grouping for
* affinity. This can be changed later if it turns out 32 is too
* few MSIs for someone, but restrictions will apply to how the
* sources can be changed independently.
*/
hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap,
ALLOC_CHUNK);
if (hwirq < 0) {
pr_debug("pasemi_msi: failed allocating hwirq\n");
return hwirq;
}
virq = irq_create_mapping(msi_mpic->irqhost, hwirq);
if (!virq) {
pr_debug("pasemi_msi: failed mapping hwirq 0x%x\n",
hwirq);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq,
ALLOC_CHUNK);
return -ENOSPC;
}
/* Vector on MSI is really an offset, the hardware adds
* it to the value written at the magic address. So set
* it to 0 to remain sane.
*/
mpic_set_vector(virq, 0);
irq_set_msi_desc(virq, entry);
irq_set_chip(virq, &mpic_pasemi_msi_chip);
irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \
"addr 0x%x\n", virq, hwirq, msg.address_lo);
/* Likewise, the device writes [0...511] into the target
* register to generate MSI [512...1023]
*/
msg.data = hwirq-0x200;
pci_write_msi_msg(virq, &msg);
}
return 0;
}
int __init mpic_pasemi_msi_init(struct mpic *mpic)
{
int rc;
struct pci_controller *phb;
struct device_node *of_node;
of_node = irq_domain_get_of_node(mpic->irqhost);
if (!of_node ||
!of_device_is_compatible(of_node,
"pasemi,pwrficient-openpic"))
return -ENODEV;
rc = mpic_msi_init_allocator(mpic);
if (rc) {
pr_debug("pasemi_msi: Error allocating bitmap!\n");
return rc;
}
pr_debug("pasemi_msi: Registering PA Semi MPIC MSI callbacks\n");
msi_mpic = mpic;
list_for_each_entry(phb, &hose_list, list_node) {
WARN_ON(phb->controller_ops.setup_msi_irqs);
phb->controller_ops.setup_msi_irqs = pasemi_msi_setup_msi_irqs;
phb->controller_ops.teardown_msi_irqs = pasemi_msi_teardown_msi_irqs;
}
return 0;
}
| linux-master | arch/powerpc/platforms/pasemi/msi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2007 PA Semi, Inc
*
* Common functions for DMA access on PA Semi PWRficient
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/sched.h>
#include <asm/pasemi_dma.h>
#define MAX_TXCH 64
#define MAX_RXCH 64
#define MAX_FLAGS 64
#define MAX_FUN 8
static struct pasdma_status *dma_status;
static void __iomem *iob_regs;
static void __iomem *mac_regs[6];
static void __iomem *dma_regs;
static int base_hw_irq;
static int num_txch, num_rxch;
static struct pci_dev *dma_pdev;
/* Bitmaps to handle allocation of channels */
static DECLARE_BITMAP(txch_free, MAX_TXCH);
static DECLARE_BITMAP(rxch_free, MAX_RXCH);
static DECLARE_BITMAP(flags_free, MAX_FLAGS);
static DECLARE_BITMAP(fun_free, MAX_FUN);
/* pasemi_read_iob_reg - read IOB register
* @reg: Register to read (offset into PCI CFG space)
*/
unsigned int pasemi_read_iob_reg(unsigned int reg)
{
return in_le32(iob_regs+reg);
}
EXPORT_SYMBOL(pasemi_read_iob_reg);
/* pasemi_write_iob_reg - write IOB register
* @reg: Register to write to (offset into PCI CFG space)
* @val: Value to write
*/
void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
{
out_le32(iob_regs+reg, val);
}
EXPORT_SYMBOL(pasemi_write_iob_reg);
/* pasemi_read_mac_reg - read MAC register
* @intf: MAC interface
* @reg: Register to read (offset into PCI CFG space)
*/
unsigned int pasemi_read_mac_reg(int intf, unsigned int reg)
{
return in_le32(mac_regs[intf]+reg);
}
EXPORT_SYMBOL(pasemi_read_mac_reg);
/* pasemi_write_mac_reg - write MAC register
* @intf: MAC interface
* @reg: Register to write to (offset into PCI CFG space)
* @val: Value to write
*/
void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
{
out_le32(mac_regs[intf]+reg, val);
}
EXPORT_SYMBOL(pasemi_write_mac_reg);
/* pasemi_read_dma_reg - read DMA register
* @reg: Register to read (offset into PCI CFG space)
*/
unsigned int pasemi_read_dma_reg(unsigned int reg)
{
return in_le32(dma_regs+reg);
}
EXPORT_SYMBOL(pasemi_read_dma_reg);
/* pasemi_write_dma_reg - write DMA register
* @reg: Register to write to (offset into PCI CFG space)
* @val: Value to write
*/
void pasemi_write_dma_reg(unsigned int reg, unsigned int val)
{
out_le32(dma_regs+reg, val);
}
EXPORT_SYMBOL(pasemi_write_dma_reg);
static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
{
int bit;
int start, limit;
switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
case TXCHAN_EVT0:
start = 0;
limit = 10;
break;
case TXCHAN_EVT1:
start = 10;
limit = MAX_TXCH;
break;
default:
start = 0;
limit = MAX_TXCH;
break;
}
retry:
bit = find_next_bit(txch_free, MAX_TXCH, start);
if (bit >= limit)
return -ENOSPC;
if (!test_and_clear_bit(bit, txch_free))
goto retry;
return bit;
}
static void pasemi_free_tx_chan(int chan)
{
BUG_ON(test_bit(chan, txch_free));
set_bit(chan, txch_free);
}
static int pasemi_alloc_rx_chan(void)
{
int bit;
retry:
bit = find_first_bit(rxch_free, MAX_RXCH);
if (bit >= MAX_TXCH)
return -ENOSPC;
if (!test_and_clear_bit(bit, rxch_free))
goto retry;
return bit;
}
static void pasemi_free_rx_chan(int chan)
{
BUG_ON(test_bit(chan, rxch_free));
set_bit(chan, rxch_free);
}
/* pasemi_dma_alloc_chan - Allocate a DMA channel
* @type: Type of channel to allocate
* @total_size: Total size of structure to allocate (to allow for more
* room behind the structure to be used by the client)
* @offset: Offset in bytes from start of the total structure to the beginning
* of struct pasemi_dmachan. Needed when struct pasemi_dmachan is
* not the first member of the client structure.
*
* pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The
* type argument specifies whether it's a RX or TX channel, and in the case
* of TX channels which group it needs to belong to (if any).
*
* Returns a pointer to the total structure allocated on success, NULL
* on failure.
*/
void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
int total_size, int offset)
{
void *buf;
struct pasemi_dmachan *chan;
int chno;
BUG_ON(total_size < sizeof(struct pasemi_dmachan));
buf = kzalloc(total_size, GFP_KERNEL);
if (!buf)
return NULL;
chan = buf + offset;
chan->priv = buf;
switch (type & (TXCHAN|RXCHAN)) {
case RXCHAN:
chno = pasemi_alloc_rx_chan();
chan->chno = chno;
chan->irq = irq_create_mapping(NULL,
base_hw_irq + num_txch + chno);
chan->status = &dma_status->rx_sta[chno];
break;
case TXCHAN:
chno = pasemi_alloc_tx_chan(type);
chan->chno = chno;
chan->irq = irq_create_mapping(NULL, base_hw_irq + chno);
chan->status = &dma_status->tx_sta[chno];
break;
}
chan->chan_type = type;
return chan;
}
EXPORT_SYMBOL(pasemi_dma_alloc_chan);
/* pasemi_dma_free_chan - Free a previously allocated channel
* @chan: Channel to free
*
* Frees a previously allocated channel. It will also deallocate any
* descriptor ring associated with the channel, if allocated.
*/
void pasemi_dma_free_chan(struct pasemi_dmachan *chan)
{
if (chan->ring_virt)
pasemi_dma_free_ring(chan);
switch (chan->chan_type & (RXCHAN|TXCHAN)) {
case RXCHAN:
pasemi_free_rx_chan(chan->chno);
break;
case TXCHAN:
pasemi_free_tx_chan(chan->chno);
break;
}
kfree(chan->priv);
}
EXPORT_SYMBOL(pasemi_dma_free_chan);
/* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel
* @chan: Channel for which to allocate
* @ring_size: Ring size in 64-bit (8-byte) words
*
* Allocate a descriptor ring for a channel. Returns 0 on success, errno
* on failure. The passed in struct pasemi_dmachan is updated with the
* virtual and DMA addresses of the ring.
*/
int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
{
BUG_ON(chan->ring_virt);
chan->ring_size = ring_size;
chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
ring_size * sizeof(u64),
&chan->ring_dma, GFP_KERNEL);
if (!chan->ring_virt)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(pasemi_dma_alloc_ring);
/* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel
* @chan: Channel for which to free the descriptor ring
*
* Frees a previously allocated descriptor ring for a channel.
*/
void pasemi_dma_free_ring(struct pasemi_dmachan *chan)
{
BUG_ON(!chan->ring_virt);
dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64),
chan->ring_virt, chan->ring_dma);
chan->ring_virt = NULL;
chan->ring_size = 0;
chan->ring_dma = 0;
}
EXPORT_SYMBOL(pasemi_dma_free_ring);
/* pasemi_dma_start_chan - Start a DMA channel
* @chan: Channel to start
* @cmdsta: Additional CCMDSTA/TCMDSTA bits to write
*
* Enables (starts) a DMA channel with optional additional arguments.
*/
void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta)
{
if (chan->chan_type == RXCHAN)
pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno),
cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN);
else
pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno),
cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN);
}
EXPORT_SYMBOL(pasemi_dma_start_chan);
/* pasemi_dma_stop_chan - Stop a DMA channel
* @chan: Channel to stop
*
* Stops (disables) a DMA channel. This is done by setting the ST bit in the
* CMDSTA register and waiting on the ACT (active) bit to clear, then
* finally disabling the whole channel.
*
* This function will only try for a short while for the channel to stop, if
* it doesn't it will return failure.
*
* Returns 1 on success, 0 on failure.
*/
#define MAX_RETRIES 5000
int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan)
{
int reg, retries;
u32 sta;
if (chan->chan_type == RXCHAN) {
reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno);
pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST);
for (retries = 0; retries < MAX_RETRIES; retries++) {
sta = pasemi_read_dma_reg(reg);
if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
pasemi_write_dma_reg(reg, 0);
return 1;
}
cond_resched();
}
} else {
reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno);
pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST);
for (retries = 0; retries < MAX_RETRIES; retries++) {
sta = pasemi_read_dma_reg(reg);
if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
pasemi_write_dma_reg(reg, 0);
return 1;
}
cond_resched();
}
}
return 0;
}
EXPORT_SYMBOL(pasemi_dma_stop_chan);
/* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
* @chan: Channel to allocate for
* @size: Size of buffer in bytes
* @handle: DMA handle
*
* Allocate a buffer to be used by the DMA engine for read/write,
* similar to dma_alloc_coherent().
*
* Returns the virtual address of the buffer, or NULL in case of failure.
*/
void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
dma_addr_t *handle)
{
return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
}
EXPORT_SYMBOL(pasemi_dma_alloc_buf);
/* pasemi_dma_free_buf - Free a buffer used for DMA
* @chan: Channel the buffer was allocated for
* @size: Size of buffer in bytes
* @handle: DMA handle
*
* Frees a previously allocated buffer.
*/
void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
dma_addr_t *handle)
{
dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
}
EXPORT_SYMBOL(pasemi_dma_free_buf);
/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization
*
* Allocates a flag for use with channel synchronization (event descriptors).
* Returns allocated flag (0-63), < 0 on error.
*/
int pasemi_dma_alloc_flag(void)
{
int bit;
retry:
bit = find_first_bit(flags_free, MAX_FLAGS);
if (bit >= MAX_FLAGS)
return -ENOSPC;
if (!test_and_clear_bit(bit, flags_free))
goto retry;
return bit;
}
EXPORT_SYMBOL(pasemi_dma_alloc_flag);
/* pasemi_dma_free_flag - Deallocates a flag (event)
* @flag: Flag number to deallocate
*
* Frees up a flag so it can be reused for other purposes.
*/
void pasemi_dma_free_flag(int flag)
{
BUG_ON(test_bit(flag, flags_free));
BUG_ON(flag >= MAX_FLAGS);
set_bit(flag, flags_free);
}
EXPORT_SYMBOL(pasemi_dma_free_flag);
/* pasemi_dma_set_flag - Sets a flag (event) to 1
* @flag: Flag number to set active
*
* Sets the flag provided to 1.
*/
void pasemi_dma_set_flag(int flag)
{
BUG_ON(flag >= MAX_FLAGS);
if (flag < 32)
pasemi_write_dma_reg(PAS_DMA_TXF_SFLG0, 1 << flag);
else
pasemi_write_dma_reg(PAS_DMA_TXF_SFLG1, 1 << flag);
}
EXPORT_SYMBOL(pasemi_dma_set_flag);
/* pasemi_dma_clear_flag - Sets a flag (event) to 0
* @flag: Flag number to set inactive
*
* Sets the flag provided to 0.
*/
void pasemi_dma_clear_flag(int flag)
{
BUG_ON(flag >= MAX_FLAGS);
if (flag < 32)
pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 1 << flag);
else
pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 1 << flag);
}
EXPORT_SYMBOL(pasemi_dma_clear_flag);
/* pasemi_dma_alloc_fun - Allocate a function engine
*
* Allocates a function engine to use for crypto/checksum offload
* Returns allocated engine (0-8), < 0 on error.
*/
int pasemi_dma_alloc_fun(void)
{
int bit;
retry:
bit = find_first_bit(fun_free, MAX_FLAGS);
if (bit >= MAX_FLAGS)
return -ENOSPC;
if (!test_and_clear_bit(bit, fun_free))
goto retry;
return bit;
}
EXPORT_SYMBOL(pasemi_dma_alloc_fun);
/* pasemi_dma_free_fun - Deallocates a function engine
* @flag: Engine number to deallocate
*
* Frees up a function engine so it can be used for other purposes.
*/
void pasemi_dma_free_fun(int fun)
{
BUG_ON(test_bit(fun, fun_free));
BUG_ON(fun >= MAX_FLAGS);
set_bit(fun, fun_free);
}
EXPORT_SYMBOL(pasemi_dma_free_fun);
static void *map_onedev(struct pci_dev *p, int index)
{
struct device_node *dn;
void __iomem *ret;
dn = pci_device_to_OF_node(p);
if (!dn)
goto fallback;
ret = of_iomap(dn, index);
if (!ret)
goto fallback;
return ret;
fallback:
/* This is hardcoded and ugly, but we have some firmware versions
* that don't provide the register space in the device tree. Luckily
* they are at well-known locations so we can just do the math here.
*/
return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
}
/* pasemi_dma_init - Initialize the PA Semi DMA library
*
* This function initializes the DMA library. It must be called before
* any other function in the library.
*
* Returns 0 on success, errno on failure.
*/
int pasemi_dma_init(void)
{
static DEFINE_SPINLOCK(init_lock);
struct pci_dev *iob_pdev;
struct pci_dev *pdev;
struct resource res;
struct device_node *dn;
int i, intf, err = 0;
unsigned long timeout;
u32 tmp;
if (!machine_is(pasemi))
return -ENODEV;
spin_lock(&init_lock);
/* Make sure we haven't already initialized */
if (dma_pdev)
goto out;
iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
if (!iob_pdev) {
BUG();
pr_warn("Can't find I/O Bridge\n");
err = -ENODEV;
goto out;
}
iob_regs = map_onedev(iob_pdev, 0);
dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
if (!dma_pdev) {
BUG();
pr_warn("Can't find DMA controller\n");
err = -ENODEV;
goto out;
}
dma_regs = map_onedev(dma_pdev, 0);
base_hw_irq = virq_to_hw(dma_pdev->irq);
pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;
pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;
intf = 0;
for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
pdev;
pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
mac_regs[intf++] = map_onedev(pdev, 0);
pci_dev_put(pdev);
for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
pdev;
pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
mac_regs[intf++] = map_onedev(pdev, 0);
pci_dev_put(pdev);
dn = pci_device_to_OF_node(iob_pdev);
if (dn)
err = of_address_to_resource(dn, 1, &res);
if (!dn || err) {
/* Fallback for old firmware */
res.start = 0xfd800000;
res.end = res.start + 0x1000;
}
dma_status = ioremap_cache(res.start, resource_size(&res));
pci_dev_put(iob_pdev);
for (i = 0; i < MAX_TXCH; i++)
__set_bit(i, txch_free);
for (i = 0; i < MAX_RXCH; i++)
__set_bit(i, rxch_free);
timeout = jiffies + HZ;
pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0);
while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) {
if (time_after(jiffies, timeout)) {
pr_warn("Warning: Could not disable RX section\n");
break;
}
}
timeout = jiffies + HZ;
pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0);
while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) {
if (time_after(jiffies, timeout)) {
pr_warn("Warning: Could not disable TX section\n");
break;
}
}
/* setup resource allocations for the different DMA sections */
tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG);
pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000);
/* enable tx section */
pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
/* enable rx section */
pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
for (i = 0; i < MAX_FLAGS; i++)
__set_bit(i, flags_free);
for (i = 0; i < MAX_FUN; i++)
__set_bit(i, fun_free);
/* clear all status flags */
pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff);
pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff);
pr_info("PA Semi PWRficient DMA library initialized "
"(%d tx, %d rx channels)\n", num_txch, num_rxch);
out:
spin_unlock(&init_lock);
return err;
}
EXPORT_SYMBOL(pasemi_dma_init);
| linux-master | arch/powerpc/platforms/pasemi/dma_lib.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2007 PA Semi, Inc
*
* Author: Olof Johansson, PA Semi
*
* Maintained by: Olof Johansson <[email protected]>
*
* Based on drivers/net/fs_enet/mii-bitbang.c.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/phy.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/platform_device.h>
#define DELAY 1
static void __iomem *gpio_regs;
struct gpio_priv {
int mdc_pin;
int mdio_pin;
};
#define MDC_PIN(bus) (((struct gpio_priv *)bus->priv)->mdc_pin)
#define MDIO_PIN(bus) (((struct gpio_priv *)bus->priv)->mdio_pin)
static inline void mdio_lo(struct mii_bus *bus)
{
out_le32(gpio_regs+0x10, 1 << MDIO_PIN(bus));
}
static inline void mdio_hi(struct mii_bus *bus)
{
out_le32(gpio_regs, 1 << MDIO_PIN(bus));
}
static inline void mdc_lo(struct mii_bus *bus)
{
out_le32(gpio_regs+0x10, 1 << MDC_PIN(bus));
}
static inline void mdc_hi(struct mii_bus *bus)
{
out_le32(gpio_regs, 1 << MDC_PIN(bus));
}
static inline void mdio_active(struct mii_bus *bus)
{
out_le32(gpio_regs+0x20, (1 << MDC_PIN(bus)) | (1 << MDIO_PIN(bus)));
}
static inline void mdio_tristate(struct mii_bus *bus)
{
out_le32(gpio_regs+0x30, (1 << MDIO_PIN(bus)));
}
static inline int mdio_read(struct mii_bus *bus)
{
return !!(in_le32(gpio_regs+0x40) & (1 << MDIO_PIN(bus)));
}
static void clock_out(struct mii_bus *bus, int bit)
{
if (bit)
mdio_hi(bus);
else
mdio_lo(bus);
udelay(DELAY);
mdc_hi(bus);
udelay(DELAY);
mdc_lo(bus);
}
/* Utility to send the preamble, address, and register (common to read and write). */
static void bitbang_pre(struct mii_bus *bus, int read, u8 addr, u8 reg)
{
int i;
/* CFE uses a really long preamble (40 bits). We'll do the same. */
mdio_active(bus);
for (i = 0; i < 40; i++) {
clock_out(bus, 1);
}
/* send the start bit (01) and the read opcode (10) or write (10) */
clock_out(bus, 0);
clock_out(bus, 1);
clock_out(bus, read);
clock_out(bus, !read);
/* send the PHY address */
for (i = 0; i < 5; i++) {
clock_out(bus, (addr & 0x10) != 0);
addr <<= 1;
}
/* send the register address */
for (i = 0; i < 5; i++) {
clock_out(bus, (reg & 0x10) != 0);
reg <<= 1;
}
}
static int gpio_mdio_read(struct mii_bus *bus, int phy_id, int location)
{
u16 rdreg;
int ret, i;
u8 addr = phy_id & 0xff;
u8 reg = location & 0xff;
bitbang_pre(bus, 1, addr, reg);
/* tri-state our MDIO I/O pin so we can read */
mdio_tristate(bus);
udelay(DELAY);
mdc_hi(bus);
udelay(DELAY);
mdc_lo(bus);
/* read 16 bits of register data, MSB first */
rdreg = 0;
for (i = 0; i < 16; i++) {
mdc_lo(bus);
udelay(DELAY);
mdc_hi(bus);
udelay(DELAY);
mdc_lo(bus);
udelay(DELAY);
rdreg <<= 1;
rdreg |= mdio_read(bus);
}
mdc_hi(bus);
udelay(DELAY);
mdc_lo(bus);
udelay(DELAY);
ret = rdreg;
return ret;
}
static int gpio_mdio_write(struct mii_bus *bus, int phy_id, int location, u16 val)
{
int i;
u8 addr = phy_id & 0xff;
u8 reg = location & 0xff;
u16 value = val & 0xffff;
bitbang_pre(bus, 0, addr, reg);
/* send the turnaround (10) */
mdc_lo(bus);
mdio_hi(bus);
udelay(DELAY);
mdc_hi(bus);
udelay(DELAY);
mdc_lo(bus);
mdio_lo(bus);
udelay(DELAY);
mdc_hi(bus);
udelay(DELAY);
/* write 16 bits of register data, MSB first */
for (i = 0; i < 16; i++) {
mdc_lo(bus);
if (value & 0x8000)
mdio_hi(bus);
else
mdio_lo(bus);
udelay(DELAY);
mdc_hi(bus);
udelay(DELAY);
value <<= 1;
}
/*
* Tri-state the MDIO line.
*/
mdio_tristate(bus);
mdc_lo(bus);
udelay(DELAY);
mdc_hi(bus);
udelay(DELAY);
return 0;
}
static int gpio_mdio_reset(struct mii_bus *bus)
{
/*nothing here - dunno how to reset it*/
return 0;
}
static int gpio_mdio_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct device_node *np = ofdev->dev.of_node;
struct mii_bus *new_bus;
struct gpio_priv *priv;
const unsigned int *prop;
int err;
err = -ENOMEM;
priv = kzalloc(sizeof(struct gpio_priv), GFP_KERNEL);
if (!priv)
goto out;
new_bus = mdiobus_alloc();
if (!new_bus)
goto out_free_priv;
new_bus->name = "pasemi gpio mdio bus";
new_bus->read = &gpio_mdio_read;
new_bus->write = &gpio_mdio_write;
new_bus->reset = &gpio_mdio_reset;
prop = of_get_property(np, "reg", NULL);
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", *prop);
new_bus->priv = priv;
prop = of_get_property(np, "mdc-pin", NULL);
priv->mdc_pin = *prop;
prop = of_get_property(np, "mdio-pin", NULL);
priv->mdio_pin = *prop;
new_bus->parent = dev;
dev_set_drvdata(dev, new_bus);
err = of_mdiobus_register(new_bus, np);
if (err != 0) {
pr_err("%s: Cannot register as MDIO bus, err %d\n",
new_bus->name, err);
goto out_free_irq;
}
return 0;
out_free_irq:
kfree(new_bus);
out_free_priv:
kfree(priv);
out:
return err;
}
static int gpio_mdio_remove(struct platform_device *dev)
{
struct mii_bus *bus = dev_get_drvdata(&dev->dev);
mdiobus_unregister(bus);
dev_set_drvdata(&dev->dev, NULL);
kfree(bus->priv);
bus->priv = NULL;
mdiobus_free(bus);
return 0;
}
static const struct of_device_id gpio_mdio_match[] =
{
{
.compatible = "gpio-mdio",
},
{},
};
MODULE_DEVICE_TABLE(of, gpio_mdio_match);
static struct platform_driver gpio_mdio_driver =
{
.probe = gpio_mdio_probe,
.remove = gpio_mdio_remove,
.driver = {
.name = "gpio-mdio-bitbang",
.of_match_table = gpio_mdio_match,
},
};
static int __init gpio_mdio_init(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "1682m-gpio");
if (!np)
np = of_find_compatible_node(NULL, NULL,
"pasemi,pwrficient-gpio");
if (!np)
return -ENODEV;
gpio_regs = of_iomap(np, 0);
of_node_put(np);
if (!gpio_regs)
return -ENODEV;
return platform_driver_register(&gpio_mdio_driver);
}
module_init(gpio_mdio_init);
static void __exit gpio_mdio_exit(void)
{
platform_driver_unregister(&gpio_mdio_driver);
if (gpio_regs)
iounmap(gpio_regs);
}
module_exit(gpio_mdio_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Olof Johansson <[email protected]>");
MODULE_DESCRIPTION("Driver for MDIO over GPIO on PA Semi PWRficient-based boards");
| linux-master | arch/powerpc/platforms/pasemi/gpio_mdio.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006 PA Semi, Inc
*
* Authors: Kip Walker, PA Semi
* Olof Johansson, PA Semi
*
* Maintained by: Olof Johansson <[email protected]>
*
* Based on arch/powerpc/platforms/maple/pci.c
*/
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <asm/pci-bridge.h>
#include <asm/isa-bridge.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include "pasemi.h"
#define PA_PXP_CFA(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
static inline int pa_pxp_offset_valid(u8 bus, u8 devfn, int offset)
{
/* Device 0 Function 0 is special: It's config space spans function 1 as
* well, so allow larger offset. It's really a two-function device but the
* second function does not probe.
*/
if (bus == 0 && devfn == 0)
return offset < 8192;
else
return offset < 4096;
}
static void volatile __iomem *pa_pxp_cfg_addr(struct pci_controller *hose,
u8 bus, u8 devfn, int offset)
{
return hose->cfg_data + PA_PXP_CFA(bus, devfn, offset);
}
static inline int is_root_port(int busno, int devfn)
{
return ((busno == 0) && (PCI_FUNC(devfn) < 4) &&
((PCI_SLOT(devfn) == 16) || (PCI_SLOT(devfn) == 17)));
}
static inline int is_5945_reg(int reg)
{
return (((reg >= 0x18) && (reg < 0x34)) ||
((reg >= 0x158) && (reg < 0x178)));
}
static int workaround_5945(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose;
void volatile __iomem *addr, *dummy;
int byte;
u32 tmp;
if (!is_root_port(bus->number, devfn) || !is_5945_reg(offset))
return 0;
hose = pci_bus_to_host(bus);
addr = pa_pxp_cfg_addr(hose, bus->number, devfn, offset & ~0x3);
byte = offset & 0x3;
/* Workaround bug 5945: write 0 to a dummy register before reading,
* and write back what we read. We must read/write the full 32-bit
* contents so we need to shift and mask by hand.
*/
dummy = pa_pxp_cfg_addr(hose, bus->number, devfn, 0x10);
out_le32(dummy, 0);
tmp = in_le32(addr);
out_le32(addr, tmp);
switch (len) {
case 1:
*val = (tmp >> (8*byte)) & 0xff;
break;
case 2:
if (byte == 0)
*val = tmp & 0xffff;
else
*val = (tmp >> 16) & 0xffff;
break;
default:
*val = tmp;
break;
}
return 1;
}
#ifdef CONFIG_PPC_PASEMI_NEMO
#define PXP_ERR_CFG_REG 0x4
#define PXP_IGNORE_PCIE_ERRORS 0x800
#define SB600_BUS 5
static void sb600_set_flag(int bus)
{
static void __iomem *iob_mapbase = NULL;
struct resource res;
struct device_node *dn;
int err;
if (iob_mapbase == NULL) {
dn = of_find_compatible_node(NULL, "isa", "pasemi,1682m-iob");
if (!dn) {
pr_crit("NEMO SB600 missing iob node\n");
return;
}
err = of_address_to_resource(dn, 0, &res);
of_node_put(dn);
if (err) {
pr_crit("NEMO SB600 missing resource\n");
return;
}
pr_info("NEMO SB600 IOB base %08llx\n",res.start);
iob_mapbase = ioremap(res.start + 0x100, 0x94);
}
if (iob_mapbase != NULL) {
if (bus == SB600_BUS) {
/*
* This is the SB600's bus, tell the PCI-e root port
* to allow non-zero devices to enumerate.
*/
out_le32(iob_mapbase + PXP_ERR_CFG_REG, in_le32(iob_mapbase + PXP_ERR_CFG_REG) | PXP_IGNORE_PCIE_ERRORS);
} else {
/*
* Only scan device 0 on other busses
*/
out_le32(iob_mapbase + PXP_ERR_CFG_REG, in_le32(iob_mapbase + PXP_ERR_CFG_REG) & ~PXP_IGNORE_PCIE_ERRORS);
}
}
}
#else
static void sb600_set_flag(int bus)
{
}
#endif
static int pa_pxp_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose;
void volatile __iomem *addr;
hose = pci_bus_to_host(bus);
if (!hose)
return PCIBIOS_DEVICE_NOT_FOUND;
if (!pa_pxp_offset_valid(bus->number, devfn, offset))
return PCIBIOS_BAD_REGISTER_NUMBER;
if (workaround_5945(bus, devfn, offset, len, val))
return PCIBIOS_SUCCESSFUL;
addr = pa_pxp_cfg_addr(hose, bus->number, devfn, offset);
sb600_set_flag(bus->number);
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
*val = in_8(addr);
break;
case 2:
*val = in_le16(addr);
break;
default:
*val = in_le32(addr);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int pa_pxp_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose;
void volatile __iomem *addr;
hose = pci_bus_to_host(bus);
if (!hose)
return PCIBIOS_DEVICE_NOT_FOUND;
if (!pa_pxp_offset_valid(bus->number, devfn, offset))
return PCIBIOS_BAD_REGISTER_NUMBER;
addr = pa_pxp_cfg_addr(hose, bus->number, devfn, offset);
sb600_set_flag(bus->number);
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
switch (len) {
case 1:
out_8(addr, val);
break;
case 2:
out_le16(addr, val);
break;
default:
out_le32(addr, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops pa_pxp_ops = {
.read = pa_pxp_read_config,
.write = pa_pxp_write_config,
};
static void __init setup_pa_pxp(struct pci_controller *hose)
{
hose->ops = &pa_pxp_ops;
hose->cfg_data = ioremap(0xe0000000, 0x10000000);
}
static int __init pas_add_bridge(struct device_node *dev)
{
struct pci_controller *hose;
pr_debug("Adding PCI host bridge %pOF\n", dev);
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
hose->first_busno = 0;
hose->last_busno = 0xff;
hose->controller_ops = pasemi_pci_controller_ops;
setup_pa_pxp(hose);
pr_info("Found PA-PXP PCI host bridge.\n");
/* Interpret the "ranges" property */
pci_process_bridge_OF_ranges(hose, dev, 1);
/*
* Scan for an isa bridge. This is needed to find the SB600 on the nemo
* and does nothing on machines without one.
*/
isa_bridge_find_early(hose);
return 0;
}
void __init pas_pci_init(void)
{
struct device_node *np;
int res;
pci_set_flags(PCI_SCAN_ALL_PCIE_DEVS);
np = of_find_compatible_node(of_root, NULL, "pasemi,rootbus");
if (np) {
res = pas_add_bridge(np);
of_node_put(np);
}
}
void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
{
struct pci_controller *hose;
hose = pci_bus_to_host(dev->bus);
return (void __iomem *)pa_pxp_cfg_addr(hose, dev->bus->number, dev->devfn, offset);
}
struct pci_controller_ops pasemi_pci_controller_ops;
| linux-master | arch/powerpc/platforms/pasemi/pci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2007 PA Semi, Inc
*
* Authors: Kip Walker, PA Semi
* Olof Johansson, PA Semi
*
* Maintained by: Olof Johansson <[email protected]>
*
* Based on arch/powerpc/platforms/maple/setup.c
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/console.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <linux/irqdomain.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
#include <asm/i8259.h>
#include <asm/mpic.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/mmu.h>
#include <asm/debug.h>
#include <pcmcia/ss.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
#include "pasemi.h"
/* SDC reset register, must be pre-mapped at reset time */
static void __iomem *reset_reg;
/* Various error status registers, must be pre-mapped at MCE time */
#define MAX_MCE_REGS 32
struct mce_regs {
char *name;
void __iomem *addr;
};
static struct mce_regs mce_regs[MAX_MCE_REGS];
static int num_mce_regs;
static int nmi_virq = 0;
static void __noreturn pas_restart(char *cmd)
{
/* Need to put others cpu in hold loop so they're not sleeping */
smp_send_stop();
udelay(10000);
printk("Restarting...\n");
while (1)
out_le32(reset_reg, 0x6000000);
}
#ifdef CONFIG_PPC_PASEMI_NEMO
void pas_shutdown(void)
{
/* Set the PLD bit that makes the SB600 think the power button is being pressed */
void __iomem *pld_map = ioremap(0xf5000000,4096);
while (1)
out_8(pld_map+7,0x01);
}
/* RTC platform device structure as is not in device tree */
static struct resource rtc_resource[] = {{
.name = "rtc",
.start = 0x70,
.end = 0x71,
.flags = IORESOURCE_IO,
}, {
.name = "rtc",
.start = 8,
.end = 8,
.flags = IORESOURCE_IRQ,
}};
static inline void nemo_init_rtc(void)
{
platform_device_register_simple("rtc_cmos", -1, rtc_resource, 2);
}
#else
static inline void nemo_init_rtc(void)
{
}
#endif
#ifdef CONFIG_SMP
static arch_spinlock_t timebase_lock;
static unsigned long timebase;
static void pas_give_timebase(void)
{
unsigned long flags;
local_irq_save(flags);
hard_irq_disable();
arch_spin_lock(&timebase_lock);
mtspr(SPRN_TBCTL, TBCTL_FREEZE);
isync();
timebase = get_tb();
arch_spin_unlock(&timebase_lock);
while (timebase)
barrier();
mtspr(SPRN_TBCTL, TBCTL_RESTART);
local_irq_restore(flags);
}
static void pas_take_timebase(void)
{
while (!timebase)
smp_rmb();
arch_spin_lock(&timebase_lock);
set_tb(timebase >> 32, timebase & 0xffffffff);
timebase = 0;
arch_spin_unlock(&timebase_lock);
}
static struct smp_ops_t pas_smp_ops = {
.probe = smp_mpic_probe,
.message_pass = smp_mpic_message_pass,
.kick_cpu = smp_generic_kick_cpu,
.setup_cpu = smp_mpic_setup_cpu,
.give_timebase = pas_give_timebase,
.take_timebase = pas_take_timebase,
};
#endif /* CONFIG_SMP */
static void __init pas_setup_arch(void)
{
#ifdef CONFIG_SMP
/* Setup SMP callback */
smp_ops = &pas_smp_ops;
#endif
/* Remap SDC register for doing reset */
/* XXXOJN This should maybe come out of the device tree */
reset_reg = ioremap(0xfc101100, 4);
}
static int __init pas_setup_mce_regs(void)
{
struct pci_dev *dev;
int reg;
/* Remap various SoC status registers for use by the MCE handler */
reg = 0;
dev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa00a, NULL);
while (dev && reg < MAX_MCE_REGS) {
mce_regs[reg].name = kasprintf(GFP_KERNEL,
"mc%d_mcdebug_errsta", reg);
mce_regs[reg].addr = pasemi_pci_getcfgaddr(dev, 0x730);
dev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa00a, dev);
reg++;
}
dev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
if (dev && reg+4 < MAX_MCE_REGS) {
mce_regs[reg].name = "iobdbg_IntStatus1";
mce_regs[reg].addr = pasemi_pci_getcfgaddr(dev, 0x438);
reg++;
mce_regs[reg].name = "iobdbg_IOCTbusIntDbgReg";
mce_regs[reg].addr = pasemi_pci_getcfgaddr(dev, 0x454);
reg++;
mce_regs[reg].name = "iobiom_IntStatus";
mce_regs[reg].addr = pasemi_pci_getcfgaddr(dev, 0xc10);
reg++;
mce_regs[reg].name = "iobiom_IntDbgReg";
mce_regs[reg].addr = pasemi_pci_getcfgaddr(dev, 0xc1c);
reg++;
}
dev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa009, NULL);
if (dev && reg+2 < MAX_MCE_REGS) {
mce_regs[reg].name = "l2csts_IntStatus";
mce_regs[reg].addr = pasemi_pci_getcfgaddr(dev, 0x200);
reg++;
mce_regs[reg].name = "l2csts_Cnt";
mce_regs[reg].addr = pasemi_pci_getcfgaddr(dev, 0x214);
reg++;
}
num_mce_regs = reg;
return 0;
}
machine_device_initcall(pasemi, pas_setup_mce_regs);
#ifdef CONFIG_PPC_PASEMI_NEMO
static void sb600_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
static void __init nemo_init_IRQ(struct mpic *mpic)
{
struct device_node *np;
int gpio_virq;
/* Connect the SB600's legacy i8259 controller */
np = of_find_node_by_path("/pxp@0,e0000000");
i8259_init(np, 0);
of_node_put(np);
gpio_virq = irq_create_mapping(NULL, 3);
irq_set_irq_type(gpio_virq, IRQ_TYPE_LEVEL_HIGH);
irq_set_chained_handler(gpio_virq, sb600_8259_cascade);
mpic_unmask_irq(irq_get_irq_data(gpio_virq));
irq_set_default_host(mpic->irqhost);
}
#else
static inline void nemo_init_IRQ(struct mpic *mpic)
{
}
#endif
static __init void pas_init_IRQ(void)
{
struct device_node *np;
struct device_node *root, *mpic_node;
unsigned long openpic_addr;
const unsigned int *opprop;
int naddr, opplen;
int mpic_flags;
const unsigned int *nmiprop;
struct mpic *mpic;
mpic_node = NULL;
for_each_node_by_type(np, "interrupt-controller")
if (of_device_is_compatible(np, "open-pic")) {
mpic_node = np;
break;
}
if (!mpic_node)
for_each_node_by_type(np, "open-pic") {
mpic_node = np;
break;
}
if (!mpic_node) {
pr_err("Failed to locate the MPIC interrupt controller\n");
return;
}
/* Find address list in /platform-open-pic */
root = of_find_node_by_path("/");
naddr = of_n_addr_cells(root);
opprop = of_get_property(root, "platform-open-pic", &opplen);
if (!opprop) {
pr_err("No platform-open-pic property.\n");
of_node_put(root);
return;
}
openpic_addr = of_read_number(opprop, naddr);
pr_debug("OpenPIC addr: %lx\n", openpic_addr);
mpic_flags = MPIC_LARGE_VECTORS | MPIC_NO_BIAS | MPIC_NO_RESET;
nmiprop = of_get_property(mpic_node, "nmi-source", NULL);
if (nmiprop)
mpic_flags |= MPIC_ENABLE_MCK;
mpic = mpic_alloc(mpic_node, openpic_addr,
mpic_flags, 0, 0, "PASEMI-OPIC");
BUG_ON(!mpic);
mpic_assign_isu(mpic, 0, mpic->paddr + 0x10000);
mpic_init(mpic);
/* The NMI/MCK source needs to be prio 15 */
if (nmiprop) {
nmi_virq = irq_create_mapping(NULL, *nmiprop);
mpic_irq_set_priority(nmi_virq, 15);
irq_set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING);
mpic_unmask_irq(irq_get_irq_data(nmi_virq));
}
nemo_init_IRQ(mpic);
of_node_put(mpic_node);
of_node_put(root);
}
static void __init pas_progress(char *s, unsigned short hex)
{
printk("[%04x] : %s\n", hex, s ? s : "");
}
static int pas_machine_check_handler(struct pt_regs *regs)
{
int cpu = smp_processor_id();
unsigned long srr0, srr1, dsisr;
int dump_slb = 0;
int i;
srr0 = regs->nip;
srr1 = regs->msr;
if (nmi_virq && mpic_get_mcirq() == nmi_virq) {
pr_err("NMI delivered\n");
debugger(regs);
mpic_end_irq(irq_get_irq_data(nmi_virq));
goto out;
}
dsisr = mfspr(SPRN_DSISR);
pr_err("Machine Check on CPU %d\n", cpu);
pr_err("SRR0 0x%016lx SRR1 0x%016lx\n", srr0, srr1);
pr_err("DSISR 0x%016lx DAR 0x%016lx\n", dsisr, regs->dar);
pr_err("BER 0x%016lx MER 0x%016lx\n", mfspr(SPRN_PA6T_BER),
mfspr(SPRN_PA6T_MER));
pr_err("IER 0x%016lx DER 0x%016lx\n", mfspr(SPRN_PA6T_IER),
mfspr(SPRN_PA6T_DER));
pr_err("Cause:\n");
if (srr1 & 0x200000)
pr_err("Signalled by SDC\n");
if (srr1 & 0x100000) {
pr_err("Load/Store detected error:\n");
if (dsisr & 0x8000)
pr_err("D-cache ECC double-bit error or bus error\n");
if (dsisr & 0x4000)
pr_err("LSU snoop response error\n");
if (dsisr & 0x2000) {
pr_err("MMU SLB multi-hit or invalid B field\n");
dump_slb = 1;
}
if (dsisr & 0x1000)
pr_err("Recoverable Duptags\n");
if (dsisr & 0x800)
pr_err("Recoverable D-cache parity error count overflow\n");
if (dsisr & 0x400)
pr_err("TLB parity error count overflow\n");
}
if (srr1 & 0x80000)
pr_err("Bus Error\n");
if (srr1 & 0x40000) {
pr_err("I-side SLB multiple hit\n");
dump_slb = 1;
}
if (srr1 & 0x20000)
pr_err("I-cache parity error hit\n");
if (num_mce_regs == 0)
pr_err("No MCE registers mapped yet, can't dump\n");
else
pr_err("SoC debug registers:\n");
for (i = 0; i < num_mce_regs; i++)
pr_err("%s: 0x%08x\n", mce_regs[i].name,
in_le32(mce_regs[i].addr));
if (dump_slb) {
unsigned long e, v;
int i;
pr_err("slb contents:\n");
for (i = 0; i < mmu_slb_size; i++) {
asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
pr_err("%02d %016lx %016lx\n", i, e, v);
}
}
out:
/* SRR1[62] is from MSR[62] if recoverable, so pass that back */
return !!(srr1 & 0x2);
}
static const struct of_device_id pasemi_bus_ids[] = {
/* Unfortunately needed for legacy firmwares */
{ .type = "localbus", },
{ .type = "sdc", },
/* These are the proper entries, which newer firmware uses */
{ .compatible = "pasemi,localbus", },
{ .compatible = "pasemi,sdc", },
{},
};
static int __init pasemi_publish_devices(void)
{
/* Publish OF platform devices for SDC and other non-PCI devices */
of_platform_bus_probe(NULL, pasemi_bus_ids, NULL);
nemo_init_rtc();
return 0;
}
machine_device_initcall(pasemi, pasemi_publish_devices);
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
static int __init pas_probe(void)
{
if (!of_machine_is_compatible("PA6T-1682M") &&
!of_machine_is_compatible("pasemi,pwrficient"))
return 0;
#ifdef CONFIG_PPC_PASEMI_NEMO
/*
* Check for the Nemo motherboard here, if we are running on one
* change the machine definition to fit
*/
if (of_machine_is_compatible("pasemi,nemo")) {
pm_power_off = pas_shutdown;
ppc_md.name = "A-EON Amigaone X1000";
}
#endif
iommu_init_early_pasemi();
return 1;
}
define_machine(pasemi) {
.name = "PA Semi PWRficient",
.probe = pas_probe,
.setup_arch = pas_setup_arch,
.discover_phbs = pas_pci_init,
.init_IRQ = pas_init_IRQ,
.get_irq = mpic_get_irq,
.restart = pas_restart,
.get_boot_time = pas_get_boot_time,
.progress = pas_progress,
.machine_check_exception = pas_machine_check_handler,
};
| linux-master | arch/powerpc/platforms/pasemi/setup.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006 PA Semi, Inc
*
* Maintained by: Olof Johansson <[email protected]>
*/
#include <linux/time.h>
#include <asm/time.h>
#include "pasemi.h"
time64_t __init pas_get_boot_time(void)
{
/* Let's just return a fake date right now */
return mktime64(2006, 1, 1, 12, 0, 0);
}
| linux-master | arch/powerpc/platforms/pasemi/time.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2005-2008, PA Semi, Inc
*
* Maintained by: Olof Johansson <[email protected]>
*/
#undef DEBUG
#include <linux/memblock.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include "pasemi.h"
#define IOBMAP_PAGE_SHIFT 12
#define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT)
#define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1)
#define IOB_BASE 0xe0000000
#define IOB_SIZE 0x3000
/* Configuration registers */
#define IOBCAP_REG 0x40
#define IOBCOM_REG 0x100
/* Enable IOB address translation */
#define IOBCOM_ATEN 0x00000100
/* Address decode configuration register */
#define IOB_AD_REG 0x14c
/* IOBCOM_AD_REG fields */
#define IOB_AD_VGPRT 0x00000e00
#define IOB_AD_VGAEN 0x00000100
/* Direct mapping settings */
#define IOB_AD_MPSEL_MASK 0x00000030
#define IOB_AD_MPSEL_B38 0x00000000
#define IOB_AD_MPSEL_B40 0x00000010
#define IOB_AD_MPSEL_B42 0x00000020
/* Translation window size / enable */
#define IOB_AD_TRNG_MASK 0x00000003
#define IOB_AD_TRNG_256M 0x00000000
#define IOB_AD_TRNG_2G 0x00000001
#define IOB_AD_TRNG_128G 0x00000003
#define IOB_TABLEBASE_REG 0x154
/* Base of the 64 4-byte L1 registers */
#define IOB_XLT_L1_REGBASE 0x2b00
/* Register to invalidate TLB entries */
#define IOB_AT_INVAL_TLB_REG 0x2d00
/* The top two bits of the level 1 entry contains valid and type flags */
#define IOBMAP_L1E_V 0x40000000
#define IOBMAP_L1E_V_B 0x80000000
/* For big page entries, the bottom two bits contains flags */
#define IOBMAP_L1E_BIG_CACHED 0x00000002
#define IOBMAP_L1E_BIG_PRIORITY 0x00000001
/* For regular level 2 entries, top 2 bits contain valid and cache flags */
#define IOBMAP_L2E_V 0x80000000
#define IOBMAP_L2E_V_CACHED 0xc0000000
static void __iomem *iob;
static u32 iob_l1_emptyval;
static u32 iob_l2_emptyval;
static u32 *iob_l2_base;
static struct iommu_table iommu_table_iobmap;
static int iommu_table_iobmap_inited;
static int iobmap_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
unsigned long attrs)
{
u32 *ip;
u32 rpn;
unsigned long bus_addr;
pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr);
bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT;
ip = ((u32 *)tbl->it_base) + index;
while (npages--) {
rpn = __pa(uaddr) >> IOBMAP_PAGE_SHIFT;
*(ip++) = IOBMAP_L2E_V | rpn;
/* invalidate tlb, can be optimized more */
out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14);
uaddr += IOBMAP_PAGE_SIZE;
bus_addr += IOBMAP_PAGE_SIZE;
}
return 0;
}
static void iobmap_free(struct iommu_table *tbl, long index,
long npages)
{
u32 *ip;
unsigned long bus_addr;
pr_debug("iobmap: free at: %lx, %lx\n", index, npages);
bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT;
ip = ((u32 *)tbl->it_base) + index;
while (npages--) {
*(ip++) = iob_l2_emptyval;
/* invalidate tlb, can be optimized more */
out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14);
bus_addr += IOBMAP_PAGE_SIZE;
}
}
static struct iommu_table_ops iommu_table_iobmap_ops = {
.set = iobmap_build,
.clear = iobmap_free
};
static void iommu_table_iobmap_setup(void)
{
pr_debug(" -> %s\n", __func__);
iommu_table_iobmap.it_busno = 0;
iommu_table_iobmap.it_offset = 0;
iommu_table_iobmap.it_page_shift = IOBMAP_PAGE_SHIFT;
/* it_size is in number of entries */
iommu_table_iobmap.it_size =
0x80000000 >> iommu_table_iobmap.it_page_shift;
/* Initialize the common IOMMU code */
iommu_table_iobmap.it_base = (unsigned long)iob_l2_base;
iommu_table_iobmap.it_index = 0;
/* XXXOJN tune this to avoid IOB cache invals.
* Should probably be 8 (64 bytes)
*/
iommu_table_iobmap.it_blocksize = 4;
iommu_table_iobmap.it_ops = &iommu_table_iobmap_ops;
if (!iommu_init_table(&iommu_table_iobmap, 0, 0, 0))
panic("Failed to initialize iommu table");
pr_debug(" <- %s\n", __func__);
}
static void pci_dma_bus_setup_pasemi(struct pci_bus *bus)
{
pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self);
if (!iommu_table_iobmap_inited) {
iommu_table_iobmap_inited = 1;
iommu_table_iobmap_setup();
}
}
static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
{
pr_debug("pci_dma_dev_setup, dev %p (%s)\n", dev, pci_name(dev));
#if !defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
/* For non-LPAR environment, don't translate anything for the DMA
* engine. The exception to this is if the user has enabled
* CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE at build time.
*/
if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) {
dev->dev.dma_ops = NULL;
/*
* Set the coherent DMA mask to prevent the iommu
* being used unnecessarily
*/
dev->dev.coherent_dma_mask = DMA_BIT_MASK(44);
return;
}
#endif
set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
}
static int __init iob_init(struct device_node *dn)
{
unsigned long tmp;
u32 regword;
int i;
pr_debug(" -> %s\n", __func__);
/* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
iob_l2_base = memblock_alloc_try_nid_raw(1UL << 21, 1UL << 21,
MEMBLOCK_LOW_LIMIT, 0x80000000,
NUMA_NO_NODE);
if (!iob_l2_base)
panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%x\n",
__func__, 1UL << 21, 1UL << 21, 0x80000000);
pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base);
/* Allocate a spare page to map all invalid IOTLB pages. */
tmp = memblock_phys_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
if (!tmp)
panic("IOBMAP: Cannot allocate spare page!");
/* Empty l1 is marked invalid */
iob_l1_emptyval = 0;
/* Empty l2 is mapped to dummy page */
iob_l2_emptyval = IOBMAP_L2E_V | (tmp >> IOBMAP_PAGE_SHIFT);
iob = ioremap(IOB_BASE, IOB_SIZE);
if (!iob)
panic("IOBMAP: Cannot map registers!");
/* setup direct mapping of the L1 entries */
for (i = 0; i < 64; i++) {
/* Each L1 covers 32MB, i.e. 8K entries = 32K of ram */
regword = IOBMAP_L1E_V | (__pa(iob_l2_base + i*0x2000) >> 12);
out_le32(iob+IOB_XLT_L1_REGBASE+i*4, regword);
}
/* set 2GB translation window, based at 0 */
regword = in_le32(iob+IOB_AD_REG);
regword &= ~IOB_AD_TRNG_MASK;
regword |= IOB_AD_TRNG_2G;
out_le32(iob+IOB_AD_REG, regword);
/* Enable translation */
regword = in_le32(iob+IOBCOM_REG);
regword |= IOBCOM_ATEN;
out_le32(iob+IOBCOM_REG, regword);
pr_debug(" <- %s\n", __func__);
return 0;
}
/* These are called very early. */
void __init iommu_init_early_pasemi(void)
{
int iommu_off;
#ifndef CONFIG_PPC_PASEMI_IOMMU
iommu_off = 1;
#else
iommu_off = of_chosen &&
of_property_read_bool(of_chosen, "linux,iommu-off");
#endif
if (iommu_off)
return;
iob_init(NULL);
pasemi_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pasemi;
pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi;
set_pci_dma_ops(&dma_iommu_ops);
}
| linux-master | arch/powerpc/platforms/pasemi/iommu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2007 PA Semi, Inc
*
* Maintained by: Olof Johansson <[email protected]>
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/irq.h>
#include <asm/machdep.h>
#include <asm/reg.h>
#include <asm/smp.h>
#include "pasemi.h"
struct sleep_mode {
char *name;
void (*entry)(void);
};
static struct sleep_mode modes[] = {
{ .name = "spin", .entry = &idle_spin },
{ .name = "doze", .entry = &idle_doze },
};
static int current_mode = 0;
static int pasemi_system_reset_exception(struct pt_regs *regs)
{
/* If we were woken up from power savings, we need to return
* to the calling function, since nip is not saved across
* all modes.
*/
if (regs->msr & SRR1_WAKEMASK)
regs_set_return_ip(regs, regs->link);
switch (regs->msr & SRR1_WAKEMASK) {
case SRR1_WAKEDEC:
set_dec(1);
break;
case SRR1_WAKEEE:
/*
* Handle these when interrupts get re-enabled and we take
* them as regular exceptions. We are in an NMI context
* and can't handle these here.
*/
break;
default:
/* do system reset */
return 0;
}
/* Set higher astate since we come out of power savings at 0 */
restore_astate(hard_smp_processor_id());
/* everything handled */
regs_set_recoverable(regs);
return 1;
}
static int __init pasemi_idle_init(void)
{
#ifndef CONFIG_PPC_PASEMI_CPUFREQ
pr_warn("No cpufreq driver, powersavings modes disabled\n");
current_mode = 0;
#endif
ppc_md.system_reset_exception = pasemi_system_reset_exception;
ppc_md.power_save = modes[current_mode].entry;
pr_info("Using PA6T idle loop (%s)\n", modes[current_mode].name);
return 0;
}
machine_late_initcall(pasemi, pasemi_idle_init);
static int __init idle_param(char *p)
{
int i;
for (i = 0; i < ARRAY_SIZE(modes); i++) {
if (!strcmp(modes[i].name, p)) {
current_mode = i;
break;
}
}
return 0;
}
early_param("idle", idle_param);
| linux-master | arch/powerpc/platforms/pasemi/idle.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2007 PA Semi, Inc
*
* Parts based on arch/powerpc/sysdev/fsl_soc.c:
*
* 2006 (c) MontaVista Software, Inc.
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/i2c.h>
#ifdef CONFIG_I2C_BOARDINFO
/* The below is from fsl_soc.c. It's copied because since there are no
* official bus bindings at this time it doesn't make sense to share across
* the platforms, even though they happen to be common.
*/
struct i2c_driver_device {
char *of_device;
char *i2c_type;
};
static struct i2c_driver_device i2c_devices[] __initdata = {
{"dallas,ds1338", "ds1338"},
};
static int __init find_i2c_driver(struct device_node *node,
struct i2c_board_info *info)
{
int i;
for (i = 0; i < ARRAY_SIZE(i2c_devices); i++) {
if (!of_device_is_compatible(node, i2c_devices[i].of_device))
continue;
if (strscpy(info->type, i2c_devices[i].i2c_type, I2C_NAME_SIZE) < 0)
return -ENOMEM;
return 0;
}
return -ENODEV;
}
static int __init pasemi_register_i2c_devices(void)
{
struct pci_dev *pdev;
struct device_node *adap_node;
struct device_node *node;
pdev = NULL;
while ((pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa003, pdev))) {
adap_node = pci_device_to_OF_node(pdev);
if (!adap_node)
continue;
for_each_child_of_node(adap_node, node) {
struct i2c_board_info info = {};
const u32 *addr;
int len;
addr = of_get_property(node, "reg", &len);
if (!addr || len < sizeof(int) ||
*addr > (1 << 10) - 1) {
pr_warn("pasemi_register_i2c_devices: invalid i2c device entry\n");
continue;
}
info.irq = irq_of_parse_and_map(node, 0);
if (!info.irq)
info.irq = -1;
if (find_i2c_driver(node, &info) < 0)
continue;
info.addr = *addr;
i2c_register_board_info(PCI_FUNC(pdev->devfn), &info,
1);
}
}
return 0;
}
device_initcall(pasemi_register_i2c_devices);
#endif
| linux-master | arch/powerpc/platforms/pasemi/misc.c |
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/irq.h>
#include <linux/dma-mapping.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/8xx_immap.h>
#include "pic.h"
#define PIC_VEC_SPURRIOUS 15
static struct irq_domain *mpc8xx_pic_host;
static unsigned long mpc8xx_cached_irq_mask;
static sysconf8xx_t __iomem *siu_reg;
static inline unsigned long mpc8xx_irqd_to_bit(struct irq_data *d)
{
return 0x80000000 >> irqd_to_hwirq(d);
}
static void mpc8xx_unmask_irq(struct irq_data *d)
{
mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d);
out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
}
static void mpc8xx_mask_irq(struct irq_data *d)
{
mpc8xx_cached_irq_mask &= ~mpc8xx_irqd_to_bit(d);
out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
}
static void mpc8xx_ack(struct irq_data *d)
{
out_be32(&siu_reg->sc_sipend, mpc8xx_irqd_to_bit(d));
}
static void mpc8xx_end_irq(struct irq_data *d)
{
mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d);
out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
}
static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
/* only external IRQ senses are programmable */
if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !(irqd_to_hwirq(d) & 1)) {
unsigned int siel = in_be32(&siu_reg->sc_siel);
siel |= mpc8xx_irqd_to_bit(d);
out_be32(&siu_reg->sc_siel, siel);
irq_set_handler_locked(d, handle_edge_irq);
}
return 0;
}
static struct irq_chip mpc8xx_pic = {
.name = "8XX SIU",
.irq_unmask = mpc8xx_unmask_irq,
.irq_mask = mpc8xx_mask_irq,
.irq_ack = mpc8xx_ack,
.irq_eoi = mpc8xx_end_irq,
.irq_set_type = mpc8xx_set_irq_type,
};
unsigned int mpc8xx_get_irq(void)
{
int irq;
/* For MPC8xx, read the SIVEC register and shift the bits down
* to get the irq number.
*/
irq = in_be32(&siu_reg->sc_sivec) >> 26;
if (irq == PIC_VEC_SPURRIOUS)
return 0;
return irq_linear_revmap(mpc8xx_pic_host, irq);
}
static int mpc8xx_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw);
/* Set default irq handle */
irq_set_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq);
return 0;
}
static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
static unsigned char map_pic_senses[4] = {
IRQ_TYPE_EDGE_RISING,
IRQ_TYPE_LEVEL_LOW,
IRQ_TYPE_LEVEL_HIGH,
IRQ_TYPE_EDGE_FALLING,
};
if (intspec[0] > 0x1f)
return 0;
*out_hwirq = intspec[0];
if (intsize > 1 && intspec[1] < 4)
*out_flags = map_pic_senses[intspec[1]];
else
*out_flags = IRQ_TYPE_NONE;
return 0;
}
static const struct irq_domain_ops mpc8xx_pic_host_ops = {
.map = mpc8xx_pic_host_map,
.xlate = mpc8xx_pic_host_xlate,
};
void __init mpc8xx_pic_init(void)
{
struct resource res;
struct device_node *np;
int ret;
np = of_find_compatible_node(NULL, NULL, "fsl,pq1-pic");
if (np == NULL)
np = of_find_node_by_type(NULL, "mpc8xx-pic");
if (np == NULL) {
printk(KERN_ERR "Could not find fsl,pq1-pic node\n");
return;
}
ret = of_address_to_resource(np, 0, &res);
if (ret)
goto out;
siu_reg = ioremap(res.start, resource_size(&res));
if (!siu_reg)
goto out;
mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL);
if (!mpc8xx_pic_host)
printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
out:
of_node_put(np);
}
| linux-master | arch/powerpc/platforms/8xx/pic.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Analogue & Micro Adder MPC875 board support
*
* Author: Scott Wood <[email protected]>
*
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*/
#include <linux/init.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/cpm1.h>
#include <asm/8xx_immap.h>
#include <asm/udbg.h>
#include "mpc8xx.h"
#include "pic.h"
struct cpm_pin {
int port, pin, flags;
};
static __initdata struct cpm_pin adder875_pins[] = {
/* SMC1 */
{CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */
{CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
/* MII1 */
{CPM_PORTA, 0, CPM_PIN_INPUT},
{CPM_PORTA, 1, CPM_PIN_INPUT},
{CPM_PORTA, 2, CPM_PIN_INPUT},
{CPM_PORTA, 3, CPM_PIN_INPUT},
{CPM_PORTA, 4, CPM_PIN_OUTPUT},
{CPM_PORTA, 10, CPM_PIN_OUTPUT},
{CPM_PORTA, 11, CPM_PIN_OUTPUT},
{CPM_PORTB, 19, CPM_PIN_INPUT},
{CPM_PORTB, 31, CPM_PIN_INPUT},
{CPM_PORTC, 12, CPM_PIN_INPUT},
{CPM_PORTC, 13, CPM_PIN_INPUT},
{CPM_PORTE, 30, CPM_PIN_OUTPUT},
{CPM_PORTE, 31, CPM_PIN_OUTPUT},
/* MII2 */
{CPM_PORTE, 14, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 15, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 16, CPM_PIN_OUTPUT},
{CPM_PORTE, 17, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 18, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 19, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 20, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 21, CPM_PIN_OUTPUT},
{CPM_PORTE, 22, CPM_PIN_OUTPUT},
{CPM_PORTE, 23, CPM_PIN_OUTPUT},
{CPM_PORTE, 24, CPM_PIN_OUTPUT},
{CPM_PORTE, 25, CPM_PIN_OUTPUT},
{CPM_PORTE, 26, CPM_PIN_OUTPUT},
{CPM_PORTE, 27, CPM_PIN_OUTPUT},
{CPM_PORTE, 28, CPM_PIN_OUTPUT},
{CPM_PORTE, 29, CPM_PIN_OUTPUT},
};
static void __init init_ioports(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(adder875_pins); i++) {
const struct cpm_pin *pin = &adder875_pins[i];
cpm1_set_pin(pin->port, pin->pin, pin->flags);
}
cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX);
/* Set FEC1 and FEC2 to MII mode */
clrbits32(&mpc8xx_immr->im_cpm.cp_cptr, 0x00000180);
}
static void __init adder875_setup(void)
{
cpm_reset();
init_ioports();
}
static const struct of_device_id of_bus_ids[] __initconst = {
{ .compatible = "simple-bus", },
{},
};
static int __init declare_of_platform_devices(void)
{
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_device_initcall(adder875, declare_of_platform_devices);
define_machine(adder875) {
.name = "Adder MPC875",
.compatible = "analogue-and-micro,adder875",
.setup_arch = adder875_setup,
.init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.progress = udbg_progress,
};
| linux-master | arch/powerpc/platforms/8xx/adder875.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 1995 Linus Torvalds
* Adapted from 'alpha' version by Gary Thomas
* Modified by Cort Dougan ([email protected])
* Modified for MBX using prep/chrp/pmac functions by Dan ([email protected])
* Further modified for generic 8xx by Dan.
*/
/*
* bootup setup stuff..
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/rtc.h>
#include <linux/fsl_devices.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <asm/io.h>
#include <asm/8xx_immap.h>
#include <mm/mmu_decl.h>
#include "pic.h"
#include "mpc8xx.h"
/* A place holder for time base interrupts, if they are ever enabled. */
static irqreturn_t timebase_interrupt(int irq, void *dev)
{
printk ("timebase_interrupt()\n");
return IRQ_HANDLED;
}
static int __init get_freq(char *name, unsigned long *val)
{
struct device_node *cpu;
const unsigned int *fp;
int found = 0;
/* The cpu node should have timebase and clock frequency properties */
cpu = of_get_cpu_node(0, NULL);
if (cpu) {
fp = of_get_property(cpu, name, NULL);
if (fp) {
found = 1;
*val = *fp;
}
of_node_put(cpu);
}
return found;
}
/* The decrementer counts at the system (internal) clock frequency divided by
* sixteen, or external oscillator divided by four. We force the processor
* to use system clock divided by sixteen.
*/
void __init mpc8xx_calibrate_decr(void)
{
struct device_node *cpu;
int irq, virq;
/* Unlock the SCCR. */
out_be32(&mpc8xx_immr->im_clkrstk.cark_sccrk, ~KAPWR_KEY);
out_be32(&mpc8xx_immr->im_clkrstk.cark_sccrk, KAPWR_KEY);
/* Force all 8xx processors to use divide by 16 processor clock. */
setbits32(&mpc8xx_immr->im_clkrst.car_sccr, 0x02000000);
/* Processor frequency is MHz.
*/
ppc_proc_freq = 50000000;
if (!get_freq("clock-frequency", &ppc_proc_freq))
printk(KERN_ERR "WARNING: Estimating processor frequency "
"(not found)\n");
ppc_tb_freq = ppc_proc_freq / 16;
printk("Decrementer Frequency = 0x%lx\n", ppc_tb_freq);
/* Perform some more timer/timebase initialization. This used
* to be done elsewhere, but other changes caused it to get
* called more than once....that is a bad thing.
*
* First, unlock all of the registers we are going to modify.
* To protect them from corruption during power down, registers
* that are maintained by keep alive power are "locked". To
* modify these registers we have to write the key value to
* the key location associated with the register.
* Some boards power up with these unlocked, while others
* are locked. Writing anything (including the unlock code?)
* to the unlocked registers will lock them again. So, here
* we guarantee the registers are locked, then we unlock them
* for our use.
*/
out_be32(&mpc8xx_immr->im_sitk.sitk_tbscrk, ~KAPWR_KEY);
out_be32(&mpc8xx_immr->im_sitk.sitk_rtcsck, ~KAPWR_KEY);
out_be32(&mpc8xx_immr->im_sitk.sitk_tbk, ~KAPWR_KEY);
out_be32(&mpc8xx_immr->im_sitk.sitk_tbscrk, KAPWR_KEY);
out_be32(&mpc8xx_immr->im_sitk.sitk_rtcsck, KAPWR_KEY);
out_be32(&mpc8xx_immr->im_sitk.sitk_tbk, KAPWR_KEY);
/* Disable the RTC one second and alarm interrupts. */
clrbits16(&mpc8xx_immr->im_sit.sit_rtcsc, (RTCSC_SIE | RTCSC_ALE));
/* Enable the RTC */
setbits16(&mpc8xx_immr->im_sit.sit_rtcsc, (RTCSC_RTF | RTCSC_RTE));
/* Enabling the decrementer also enables the timebase interrupts
* (or from the other point of view, to get decrementer interrupts
* we have to enable the timebase). The decrementer interrupt
* is wired into the vector table, nothing to do here for that.
*/
cpu = of_get_cpu_node(0, NULL);
virq= irq_of_parse_and_map(cpu, 0);
of_node_put(cpu);
irq = virq_to_hw(virq);
out_be16(&mpc8xx_immr->im_sit.sit_tbscr,
((1 << (7 - (irq / 2))) << 8) | (TBSCR_TBF | TBSCR_TBE));
if (request_irq(virq, timebase_interrupt, IRQF_NO_THREAD, "tbint",
NULL))
panic("Could not allocate timer IRQ!");
}
/* The RTC on the MPC8xx is an internal register.
* We want to protect this during power down, so we need to unlock,
* modify, and re-lock.
*/
int mpc8xx_set_rtc_time(struct rtc_time *tm)
{
time64_t time;
time = rtc_tm_to_time64(tm);
out_be32(&mpc8xx_immr->im_sitk.sitk_rtck, KAPWR_KEY);
out_be32(&mpc8xx_immr->im_sit.sit_rtc, (u32)time);
out_be32(&mpc8xx_immr->im_sitk.sitk_rtck, ~KAPWR_KEY);
return 0;
}
void mpc8xx_get_rtc_time(struct rtc_time *tm)
{
unsigned long data;
/* Get time from the RTC. */
data = in_be32(&mpc8xx_immr->im_sit.sit_rtc);
rtc_time64_to_tm(data, tm);
return;
}
void __noreturn mpc8xx_restart(char *cmd)
{
local_irq_disable();
setbits32(&mpc8xx_immr->im_clkrst.car_plprcr, 0x00000080);
/* Clear the ME bit in MSR to cause checkstop on machine check
*/
mtmsr(mfmsr() & ~0x1000);
in_8(&mpc8xx_immr->im_clkrst.res[0]);
panic("Restart failed\n");
}
| linux-master | arch/powerpc/platforms/8xx/m8xx_setup.c |
/*arch/powerpc/platforms/8xx/mpc86xads_setup.c
*
* Platform setup for the Freescale mpc86xads board
*
* Vitaly Bordug <[email protected]>
*
* Copyright 2005 MontaVista Software Inc.
*
* Heavily modified by Scott Wood <[email protected]>
* Copyright 2007 Freescale Semiconductor, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/8xx_immap.h>
#include <asm/cpm1.h>
#include <asm/udbg.h>
#include "mpc86xads.h"
#include "mpc8xx.h"
#include "pic.h"
struct cpm_pin {
int port, pin, flags;
};
static struct cpm_pin mpc866ads_pins[] = {
/* SMC1 */
{CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */
{CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
/* SMC2 */
{CPM_PORTB, 21, CPM_PIN_INPUT}, /* RX */
{CPM_PORTB, 20, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
/* SCC1 */
{CPM_PORTA, 6, CPM_PIN_INPUT}, /* CLK1 */
{CPM_PORTA, 7, CPM_PIN_INPUT}, /* CLK2 */
{CPM_PORTA, 14, CPM_PIN_INPUT}, /* TX */
{CPM_PORTA, 15, CPM_PIN_INPUT}, /* RX */
{CPM_PORTB, 19, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TENA */
{CPM_PORTC, 10, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* RENA */
{CPM_PORTC, 11, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* CLSN */
/* MII */
{CPM_PORTD, 3, CPM_PIN_OUTPUT},
{CPM_PORTD, 4, CPM_PIN_OUTPUT},
{CPM_PORTD, 5, CPM_PIN_OUTPUT},
{CPM_PORTD, 6, CPM_PIN_OUTPUT},
{CPM_PORTD, 7, CPM_PIN_OUTPUT},
{CPM_PORTD, 8, CPM_PIN_OUTPUT},
{CPM_PORTD, 9, CPM_PIN_OUTPUT},
{CPM_PORTD, 10, CPM_PIN_OUTPUT},
{CPM_PORTD, 11, CPM_PIN_OUTPUT},
{CPM_PORTD, 12, CPM_PIN_OUTPUT},
{CPM_PORTD, 13, CPM_PIN_OUTPUT},
{CPM_PORTD, 14, CPM_PIN_OUTPUT},
{CPM_PORTD, 15, CPM_PIN_OUTPUT},
/* I2C */
{CPM_PORTB, 26, CPM_PIN_INPUT | CPM_PIN_OPENDRAIN},
{CPM_PORTB, 27, CPM_PIN_INPUT | CPM_PIN_OPENDRAIN},
};
static void __init init_ioports(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(mpc866ads_pins); i++) {
struct cpm_pin *pin = &mpc866ads_pins[i];
cpm1_set_pin(pin->port, pin->pin, pin->flags);
}
cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX);
cpm1_clk_setup(CPM_CLK_SMC2, CPM_BRG2, CPM_CLK_RTX);
cpm1_clk_setup(CPM_CLK_SCC1, CPM_CLK1, CPM_CLK_TX);
cpm1_clk_setup(CPM_CLK_SCC1, CPM_CLK2, CPM_CLK_RX);
/* Set FEC1 and FEC2 to MII mode */
clrbits32(&mpc8xx_immr->im_cpm.cp_cptr, 0x00000180);
}
static void __init mpc86xads_setup_arch(void)
{
struct device_node *np;
u32 __iomem *bcsr_io;
cpm_reset();
init_ioports();
np = of_find_compatible_node(NULL, NULL, "fsl,mpc866ads-bcsr");
if (!np) {
printk(KERN_CRIT "Could not find fsl,mpc866ads-bcsr node\n");
return;
}
bcsr_io = of_iomap(np, 0);
of_node_put(np);
if (bcsr_io == NULL) {
printk(KERN_CRIT "Could not remap BCSR\n");
return;
}
clrbits32(bcsr_io, BCSR1_RS232EN_1 | BCSR1_RS232EN_2 | BCSR1_ETHEN);
iounmap(bcsr_io);
}
static const struct of_device_id of_bus_ids[] __initconst = {
{ .name = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
{},
};
static int __init declare_of_platform_devices(void)
{
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_device_initcall(mpc86x_ads, declare_of_platform_devices);
define_machine(mpc86x_ads) {
.name = "MPC86x ADS",
.compatible = "fsl,mpc866ads",
.setup_arch = mpc86xads_setup_arch,
.init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = mpc8xx_calibrate_decr,
.set_rtc_time = mpc8xx_set_rtc_time,
.get_rtc_time = mpc8xx_get_rtc_time,
.progress = udbg_progress,
};
| linux-master | arch/powerpc/platforms/8xx/mpc86xads_setup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Microcode patches for the CPM as supplied by Motorola.
* This is the one for IIC/SPI. There is a newer one that
* also relocates SMC2, but this would require additional changes
* to uart.c, so I am holding off on that for a moment.
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/8xx_immap.h>
#include <asm/cpm.h>
#include <asm/cpm1.h>
struct patch_params {
ushort rccr;
ushort cpmcr1;
ushort cpmcr2;
ushort cpmcr3;
ushort cpmcr4;
};
/*
* I2C/SPI relocation patch arrays.
*/
#ifdef CONFIG_I2C_SPI_UCODE_PATCH
static char patch_name[] __initdata = "I2C/SPI";
static struct patch_params patch_params __initdata = {
1, 0x802a, 0x8028, 0x802e, 0x802c,
};
static uint patch_2000[] __initdata = {
0x7FFFEFD9, 0x3FFD0000, 0x7FFB49F7, 0x7FF90000,
0x5FEFADF7, 0x5F89ADF7, 0x5FEFAFF7, 0x5F89AFF7,
0x3A9CFBC8, 0xE7C0EDF0, 0x77C1E1BB, 0xF4DC7F1D,
0xABAD932F, 0x4E08FDCF, 0x6E0FAFF8, 0x7CCF76CF,
0xFD1FF9CF, 0xABF88DC6, 0xAB5679F7, 0xB0937383,
0xDFCE79F7, 0xB091E6BB, 0xE5BBE74F, 0xB3FA6F0F,
0x6FFB76CE, 0xEE0DF9CF, 0x2BFBEFEF, 0xCFEEF9CF,
0x76CEAD24, 0x90B2DF9A, 0x7FDDD0BF, 0x4BF847FD,
0x7CCF76CE, 0xCFEF7E1F, 0x7F1D7DFD, 0xF0B6EF71,
0x7FC177C1, 0xFBC86079, 0xE722FBC8, 0x5FFFDFFF,
0x5FB2FFFB, 0xFBC8F3C8, 0x94A67F01, 0x7F1D5F39,
0xAFE85F5E, 0xFFDFDF96, 0xCB9FAF7D, 0x5FC1AFED,
0x8C1C5FC1, 0xAFDD5FC3, 0xDF9A7EFD, 0xB0B25FB2,
0xFFFEABAD, 0x5FB2FFFE, 0x5FCE600B, 0xE6BB600B,
0x5FCEDFC6, 0x27FBEFDF, 0x5FC8CFDE, 0x3A9CE7C0,
0xEDF0F3C8, 0x7F0154CD, 0x7F1D2D3D, 0x363A7570,
0x7E0AF1CE, 0x37EF2E68, 0x7FEE10EC, 0xADF8EFDE,
0xCFEAE52F, 0x7D0FE12B, 0xF1CE5F65, 0x7E0A4DF8,
0xCFEA5F72, 0x7D0BEFEE, 0xCFEA5F74, 0xE522EFDE,
0x5F74CFDA, 0x0B627385, 0xDF627E0A, 0x30D8145B,
0xBFFFF3C8, 0x5FFFDFFF, 0xA7F85F5E, 0xBFFE7F7D,
0x10D31450, 0x5F36BFFF, 0xAF785F5E, 0xBFFDA7F8,
0x5F36BFFE, 0x77FD30C0, 0x4E08FDCF, 0xE5FF6E0F,
0xAFF87E1F, 0x7E0FFD1F, 0xF1CF5F1B, 0xABF80D5E,
0x5F5EFFEF, 0x79F730A2, 0xAFDD5F34, 0x47F85F34,
0xAFED7FDD, 0x50B24978, 0x47FD7F1D, 0x7DFD70AD,
0xEF717EC1, 0x6BA47F01, 0x2D267EFD, 0x30DE5F5E,
0xFFFD5F5E, 0xFFEF5F5E, 0xFFDF0CA0, 0xAFED0A9E,
0xAFDD0C3A, 0x5F3AAFBD, 0x7FBDB082, 0x5F8247F8
};
static uint patch_2f00[] __initdata = {
0x3E303430, 0x34343737, 0xABF7BF9B, 0x994B4FBD,
0xBD599493, 0x349FFF37, 0xFB9B177D, 0xD9936956,
0xBBFDD697, 0xBDD2FD11, 0x31DB9BB3, 0x63139637,
0x93733693, 0x193137F7, 0x331737AF, 0x7BB9B999,
0xBB197957, 0x7FDFD3D5, 0x73B773F7, 0x37933B99,
0x1D115316, 0x99315315, 0x31694BF4, 0xFBDBD359,
0x31497353, 0x76956D69, 0x7B9D9693, 0x13131979,
0x79376935
};
static uint patch_2e00[] __initdata = {};
#endif
/*
* I2C/SPI/SMC1 relocation patch arrays.
*/
#ifdef CONFIG_I2C_SPI_SMC1_UCODE_PATCH
static char patch_name[] __initdata = "I2C/SPI/SMC1";
static struct patch_params patch_params __initdata = {
3, 0x8080, 0x808a, 0x8028, 0x802a,
};
static uint patch_2000[] __initdata = {
0x3fff0000, 0x3ffd0000, 0x3ffb0000, 0x3ff90000,
0x5f13eff8, 0x5eb5eff8, 0x5f88adf7, 0x5fefadf7,
0x3a9cfbc8, 0x77cae1bb, 0xf4de7fad, 0xabae9330,
0x4e08fdcf, 0x6e0faff8, 0x7ccf76cf, 0xfdaff9cf,
0xabf88dc8, 0xab5879f7, 0xb0925d8d, 0xdfd079f7,
0xb090e6bb, 0xe5bbe74f, 0x9e046f0f, 0x6ffb76ce,
0xee0cf9cf, 0x2bfbefef, 0xcfeef9cf, 0x76cead23,
0x90b3df99, 0x7fddd0c1, 0x4bf847fd, 0x7ccf76ce,
0xcfef77ca, 0x7eaf7fad, 0x7dfdf0b7, 0xef7a7fca,
0x77cafbc8, 0x6079e722, 0xfbc85fff, 0xdfff5fb3,
0xfffbfbc8, 0xf3c894a5, 0xe7c9edf9, 0x7f9a7fad,
0x5f36afe8, 0x5f5bffdf, 0xdf95cb9e, 0xaf7d5fc3,
0xafed8c1b, 0x5fc3afdd, 0x5fc5df99, 0x7efdb0b3,
0x5fb3fffe, 0xabae5fb3, 0xfffe5fd0, 0x600be6bb,
0x600b5fd0, 0xdfc827fb, 0xefdf5fca, 0xcfde3a9c,
0xe7c9edf9, 0xf3c87f9e, 0x54ca7fed, 0x2d3a3637,
0x756f7e9a, 0xf1ce37ef, 0x2e677fee, 0x10ebadf8,
0xefdecfea, 0xe52f7d9f, 0xe12bf1ce, 0x5f647e9a,
0x4df8cfea, 0x5f717d9b, 0xefeecfea, 0x5f73e522,
0xefde5f73, 0xcfda0b61, 0x5d8fdf61, 0xe7c9edf9,
0x7e9a30d5, 0x1458bfff, 0xf3c85fff, 0xdfffa7f8,
0x5f5bbffe, 0x7f7d10d0, 0x144d5f33, 0xbfffaf78,
0x5f5bbffd, 0xa7f85f33, 0xbffe77fd, 0x30bd4e08,
0xfdcfe5ff, 0x6e0faff8, 0x7eef7e9f, 0xfdeff1cf,
0x5f17abf8, 0x0d5b5f5b, 0xffef79f7, 0x309eafdd,
0x5f3147f8, 0x5f31afed, 0x7fdd50af, 0x497847fd,
0x7f9e7fed, 0x7dfd70a9, 0xef7e7ece, 0x6ba07f9e,
0x2d227efd, 0x30db5f5b, 0xfffd5f5b, 0xffef5f5b,
0xffdf0c9c, 0xafed0a9a, 0xafdd0c37, 0x5f37afbd,
0x7fbdb081, 0x5f8147f8, 0x3a11e710, 0xedf0ccdd,
0xf3186d0a, 0x7f0e5f06, 0x7fedbb38, 0x3afe7468,
0x7fedf4fc, 0x8ffbb951, 0xb85f77fd, 0xb0df5ddd,
0xdefe7fed, 0x90e1e74d, 0x6f0dcbf7, 0xe7decfed,
0xcb74cfed, 0xcfeddf6d, 0x91714f74, 0x5dd2deef,
0x9e04e7df, 0xefbb6ffb, 0xe7ef7f0e, 0x9e097fed,
0xebdbeffa, 0xeb54affb, 0x7fea90d7, 0x7e0cf0c3,
0xbffff318, 0x5fffdfff, 0xac59efea, 0x7fce1ee5,
0xe2ff5ee1, 0xaffbe2ff, 0x5ee3affb, 0xf9cc7d0f,
0xaef8770f, 0x7d0fb0c6, 0xeffbbfff, 0xcfef5ede,
0x7d0fbfff, 0x5ede4cf8, 0x7fddd0bf, 0x49f847fd,
0x7efdf0bb, 0x7fedfffd, 0x7dfdf0b7, 0xef7e7e1e,
0x5ede7f0e, 0x3a11e710, 0xedf0ccab, 0xfb18ad2e,
0x1ea9bbb8, 0x74283b7e, 0x73c2e4bb, 0x2ada4fb8,
0xdc21e4bb, 0xb2a1ffbf, 0x5e2c43f8, 0xfc87e1bb,
0xe74ffd91, 0x6f0f4fe8, 0xc7ba32e2, 0xf396efeb,
0x600b4f78, 0xe5bb760b, 0x53acaef8, 0x4ef88b0e,
0xcfef9e09, 0xabf8751f, 0xefef5bac, 0x741f4fe8,
0x751e760d, 0x7fdbf081, 0x741cafce, 0xefcc7fce,
0x751e70ac, 0x741ce7bb, 0x3372cfed, 0xafdbefeb,
0xe5bb760b, 0x53f2aef8, 0xafe8e7eb, 0x4bf8771e,
0x7e247fed, 0x4fcbe2cc, 0x7fbc30a9, 0x7b0f7a0f,
0x34d577fd, 0x308b5db7, 0xde553e5f, 0xaf78741f,
0x741f30f0, 0xcfef5e2c, 0x741f3eac, 0xafb8771e,
0x5e677fed, 0x0bd3e2cc, 0x741ccfec, 0xe5ca53cd,
0x6fcb4f74, 0x5dadde4b, 0x2ab63d38, 0x4bb3de30,
0x751f741c, 0x6c42effa, 0xefea7fce, 0x6ffc30be,
0xefec3fca, 0x30b3de2e, 0xadf85d9e, 0xaf7daefd,
0x5d9ede2e, 0x5d9eafdd, 0x761f10ac, 0x1da07efd,
0x30adfffe, 0x4908fb18, 0x5fffdfff, 0xafbb709b,
0x4ef85e67, 0xadf814ad, 0x7a0f70ad, 0xcfef50ad,
0x7a0fde30, 0x5da0afed, 0x3c12780f, 0xefef780f,
0xefef790f, 0xa7f85e0f, 0xffef790f, 0xefef790f,
0x14adde2e, 0x5d9eadfd, 0x5e2dfffb, 0xe79addfd,
0xeff96079, 0x607ae79a, 0xddfceff9, 0x60795dff,
0x607acfef, 0xefefefdf, 0xefbfef7f, 0xeeffedff,
0xebffe7ff, 0xafefafdf, 0xafbfaf7f, 0xaeffadff,
0xabffa7ff, 0x6fef6fdf, 0x6fbf6f7f, 0x6eff6dff,
0x6bff67ff, 0x2fef2fdf, 0x2fbf2f7f, 0x2eff2dff,
0x2bff27ff, 0x4e08fd1f, 0xe5ff6e0f, 0xaff87eef,
0x7e0ffdef, 0xf11f6079, 0xabf8f542, 0x7e0af11c,
0x37cfae3a, 0x7fec90be, 0xadf8efdc, 0xcfeae52f,
0x7d0fe12b, 0xf11c6079, 0x7e0a4df8, 0xcfea5dc4,
0x7d0befec, 0xcfea5dc6, 0xe522efdc, 0x5dc6cfda,
0x4e08fd1f, 0x6e0faff8, 0x7c1f761f, 0xfdeff91f,
0x6079abf8, 0x761cee24, 0xf91f2bfb, 0xefefcfec,
0xf91f6079, 0x761c27fb, 0xefdf5da7, 0xcfdc7fdd,
0xd09c4bf8, 0x47fd7c1f, 0x761ccfcf, 0x7eef7fed,
0x7dfdf093, 0xef7e7f1e, 0x771efb18, 0x6079e722,
0xe6bbe5bb, 0xae0ae5bb, 0x600bae85, 0xe2bbe2bb,
0xe2bbe2bb, 0xaf02e2bb, 0xe2bb2ff9, 0x6079e2bb
};
static uint patch_2f00[] __initdata = {
0x30303030, 0x3e3e3434, 0xabbf9b99, 0x4b4fbdbd,
0x59949334, 0x9fff37fb, 0x9b177dd9, 0x936956bb,
0xfbdd697b, 0xdd2fd113, 0x1db9f7bb, 0x36313963,
0x79373369, 0x3193137f, 0x7331737a, 0xf7bb9b99,
0x9bb19795, 0x77fdfd3d, 0x573b773f, 0x737933f7,
0xb991d115, 0x31699315, 0x31531694, 0xbf4fbdbd,
0x35931497, 0x35376956, 0xbd697b9d, 0x96931313,
0x19797937, 0x6935af78, 0xb9b3baa3, 0xb8788683,
0x368f78f7, 0x87778733, 0x3ffffb3b, 0x8e8f78b8,
0x1d118e13, 0xf3ff3f8b, 0x6bd8e173, 0xd1366856,
0x68d1687b, 0x3daf78b8, 0x3a3a3f87, 0x8f81378f,
0xf876f887, 0x77fd8778, 0x737de8d6, 0xbbf8bfff,
0xd8df87f7, 0xfd876f7b, 0x8bfff8bd, 0x8683387d,
0xb873d87b, 0x3b8fd7f8, 0xf7338883, 0xbb8ee1f8,
0xef837377, 0x3337b836, 0x817d11f8, 0x7378b878,
0xd3368b7d, 0xed731b7d, 0x833731f3, 0xf22f3f23
};
static uint patch_2e00[] __initdata = {
0x27eeeeee, 0xeeeeeeee, 0xeeeeeeee, 0xeeeeeeee,
0xee4bf4fb, 0xdbd259bb, 0x1979577f, 0xdfd2d573,
0xb773f737, 0x4b4fbdbd, 0x25b9b177, 0xd2d17376,
0x956bbfdd, 0x697bdd2f, 0xff9f79ff, 0xff9ff22f
};
#endif
/*
* USB SOF patch arrays.
*/
#ifdef CONFIG_USB_SOF_UCODE_PATCH
static char patch_name[] __initdata = "USB SOF";
static struct patch_params patch_params __initdata = {
9,
};
static uint patch_2000[] __initdata = {
0x7fff0000, 0x7ffd0000, 0x7ffb0000, 0x49f7ba5b,
0xba383ffb, 0xf9b8b46d, 0xe5ab4e07, 0xaf77bffe,
0x3f7bbf79, 0xba5bba38, 0xe7676076, 0x60750000
};
static uint patch_2f00[] __initdata = {
0x3030304c, 0xcab9e441, 0xa1aaf220
};
static uint patch_2e00[] __initdata = {};
#endif
/*
* SMC relocation patch arrays.
*/
#ifdef CONFIG_SMC_UCODE_PATCH
static char patch_name[] __initdata = "SMC";
static struct patch_params patch_params __initdata = {
2, 0x8080, 0x8088,
};
static uint patch_2000[] __initdata = {
0x3fff0000, 0x3ffd0000, 0x3ffb0000, 0x3ff90000,
0x5fefeff8, 0x5f91eff8, 0x3ff30000, 0x3ff10000,
0x3a11e710, 0xedf0ccb9, 0xf318ed66, 0x7f0e5fe2,
0x7fedbb38, 0x3afe7468, 0x7fedf4d8, 0x8ffbb92d,
0xb83b77fd, 0xb0bb5eb9, 0xdfda7fed, 0x90bde74d,
0x6f0dcbd3, 0xe7decfed, 0xcb50cfed, 0xcfeddf6d,
0x914d4f74, 0x5eaedfcb, 0x9ee0e7df, 0xefbb6ffb,
0xe7ef7f0e, 0x9ee57fed, 0xebb7effa, 0xeb30affb,
0x7fea90b3, 0x7e0cf09f, 0xbffff318, 0x5fffdfff,
0xac35efea, 0x7fce1fc1, 0xe2ff5fbd, 0xaffbe2ff,
0x5fbfaffb, 0xf9a87d0f, 0xaef8770f, 0x7d0fb0a2,
0xeffbbfff, 0xcfef5fba, 0x7d0fbfff, 0x5fba4cf8,
0x7fddd09b, 0x49f847fd, 0x7efdf097, 0x7fedfffd,
0x7dfdf093, 0xef7e7e1e, 0x5fba7f0e, 0x3a11e710,
0xedf0cc87, 0xfb18ad0a, 0x1f85bbb8, 0x74283b7e,
0x7375e4bb, 0x2ab64fb8, 0x5c7de4bb, 0x32fdffbf,
0x5f0843f8, 0x7ce3e1bb, 0xe74f7ded, 0x6f0f4fe8,
0xc7ba32be, 0x73f2efeb, 0x600b4f78, 0xe5bb760b,
0x5388aef8, 0x4ef80b6a, 0xcfef9ee5, 0xabf8751f,
0xefef5b88, 0x741f4fe8, 0x751e760d, 0x7fdb70dd,
0x741cafce, 0xefcc7fce, 0x751e7088, 0x741ce7bb,
0x334ecfed, 0xafdbefeb, 0xe5bb760b, 0x53ceaef8,
0xafe8e7eb, 0x4bf8771e, 0x7e007fed, 0x4fcbe2cc,
0x7fbc3085, 0x7b0f7a0f, 0x34b177fd, 0xb0e75e93,
0xdf313e3b, 0xaf78741f, 0x741f30cc, 0xcfef5f08,
0x741f3e88, 0xafb8771e, 0x5f437fed, 0x0bafe2cc,
0x741ccfec, 0xe5ca53a9, 0x6fcb4f74, 0x5e89df27,
0x2a923d14, 0x4b8fdf0c, 0x751f741c, 0x6c1eeffa,
0xefea7fce, 0x6ffc309a, 0xefec3fca, 0x308fdf0a,
0xadf85e7a, 0xaf7daefd, 0x5e7adf0a, 0x5e7aafdd,
0x761f1088, 0x1e7c7efd, 0x3089fffe, 0x4908fb18,
0x5fffdfff, 0xafbbf0f7, 0x4ef85f43, 0xadf81489,
0x7a0f7089, 0xcfef5089, 0x7a0fdf0c, 0x5e7cafed,
0xbc6e780f, 0xefef780f, 0xefef790f, 0xa7f85eeb,
0xffef790f, 0xefef790f, 0x1489df0a, 0x5e7aadfd,
0x5f09fffb, 0xe79aded9, 0xeff96079, 0x607ae79a,
0xded8eff9, 0x60795edb, 0x607acfef, 0xefefefdf,
0xefbfef7f, 0xeeffedff, 0xebffe7ff, 0xafefafdf,
0xafbfaf7f, 0xaeffadff, 0xabffa7ff, 0x6fef6fdf,
0x6fbf6f7f, 0x6eff6dff, 0x6bff67ff, 0x2fef2fdf,
0x2fbf2f7f, 0x2eff2dff, 0x2bff27ff, 0x4e08fd1f,
0xe5ff6e0f, 0xaff87eef, 0x7e0ffdef, 0xf11f6079,
0xabf8f51e, 0x7e0af11c, 0x37cfae16, 0x7fec909a,
0xadf8efdc, 0xcfeae52f, 0x7d0fe12b, 0xf11c6079,
0x7e0a4df8, 0xcfea5ea0, 0x7d0befec, 0xcfea5ea2,
0xe522efdc, 0x5ea2cfda, 0x4e08fd1f, 0x6e0faff8,
0x7c1f761f, 0xfdeff91f, 0x6079abf8, 0x761cee00,
0xf91f2bfb, 0xefefcfec, 0xf91f6079, 0x761c27fb,
0xefdf5e83, 0xcfdc7fdd, 0x50f84bf8, 0x47fd7c1f,
0x761ccfcf, 0x7eef7fed, 0x7dfd70ef, 0xef7e7f1e,
0x771efb18, 0x6079e722, 0xe6bbe5bb, 0x2e66e5bb,
0x600b2ee1, 0xe2bbe2bb, 0xe2bbe2bb, 0x2f5ee2bb,
0xe2bb2ff9, 0x6079e2bb,
};
static uint patch_2f00[] __initdata = {
0x30303030, 0x3e3e3030, 0xaf79b9b3, 0xbaa3b979,
0x9693369f, 0x79f79777, 0x97333fff, 0xfb3b9e9f,
0x79b91d11, 0x9e13f3ff, 0x3f9b6bd9, 0xe173d136,
0x695669d1, 0x697b3daf, 0x79b93a3a, 0x3f979f91,
0x379ff976, 0xf99777fd, 0x9779737d, 0xe9d6bbf9,
0xbfffd9df, 0x97f7fd97, 0x6f7b9bff, 0xf9bd9683,
0x397db973, 0xd97b3b9f, 0xd7f9f733, 0x9993bb9e,
0xe1f9ef93, 0x73773337, 0xb936917d, 0x11f87379,
0xb979d336, 0x8b7ded73, 0x1b7d9337, 0x31f3f22f,
0x3f2327ee, 0xeeeeeeee, 0xeeeeeeee, 0xeeeeeeee,
0xeeeeee4b, 0xf4fbdbd2, 0x58bb1878, 0x577fdfd2,
0xd573b773, 0xf7374b4f, 0xbdbd25b8, 0xb177d2d1,
0x7376856b, 0xbfdd687b, 0xdd2fff8f, 0x78ffff8f,
0xf22f0000,
};
static uint patch_2e00[] __initdata = {};
#endif
static void __init cpm_write_patch(cpm8xx_t *cp, int offset, uint *patch, int len)
{
if (!len)
return;
memcpy_toio(cp->cp_dpmem + offset, patch, len);
}
void __init cpm_load_patch(cpm8xx_t *cp)
{
out_be16(&cp->cp_rccr, 0);
cpm_write_patch(cp, 0, patch_2000, sizeof(patch_2000));
cpm_write_patch(cp, 0xf00, patch_2f00, sizeof(patch_2f00));
cpm_write_patch(cp, 0xe00, patch_2e00, sizeof(patch_2e00));
if (IS_ENABLED(CONFIG_I2C_SPI_UCODE_PATCH) ||
IS_ENABLED(CONFIG_I2C_SPI_SMC1_UCODE_PATCH)) {
u16 rpbase = 0x500;
iic_t *iip;
struct spi_pram *spp;
iip = (iic_t *)&cp->cp_dparam[PROFF_IIC];
out_be16(&iip->iic_rpbase, rpbase);
/* Put SPI above the IIC, also 32-byte aligned. */
spp = (struct spi_pram *)&cp->cp_dparam[PROFF_SPI];
out_be16(&spp->rpbase, (rpbase + sizeof(iic_t) + 31) & ~31);
if (IS_ENABLED(CONFIG_I2C_SPI_SMC1_UCODE_PATCH)) {
smc_uart_t *smp;
smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC1];
out_be16(&smp->smc_rpbase, 0x1FC0);
}
}
if (IS_ENABLED(CONFIG_SMC_UCODE_PATCH)) {
smc_uart_t *smp;
if (IS_ENABLED(CONFIG_PPC_EARLY_DEBUG_CPM)) {
int i;
for (i = 0; i < sizeof(*smp); i += 4) {
u32 __iomem *src = (u32 __iomem *)&cp->cp_dparam[PROFF_SMC1 + i];
u32 __iomem *dst = (u32 __iomem *)&cp->cp_dparam[PROFF_DSP1 + i];
out_be32(dst, in_be32(src));
}
}
smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC1];
out_be16(&smp->smc_rpbase, 0x1ec0);
smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC2];
out_be16(&smp->smc_rpbase, 0x1fc0);
}
out_be16(&cp->cp_cpmcr1, patch_params.cpmcr1);
out_be16(&cp->cp_cpmcr2, patch_params.cpmcr2);
out_be16(&cp->cp_cpmcr3, patch_params.cpmcr3);
out_be16(&cp->cp_cpmcr4, patch_params.cpmcr4);
out_be16(&cp->cp_rccr, patch_params.rccr);
pr_info("%s microcode patch installed\n", patch_name);
}
| linux-master | arch/powerpc/platforms/8xx/micropatch.c |
/*
* Platform setup for the Freescale mpc885ads board
*
* Vitaly Bordug <[email protected]>
*
* Copyright 2005 MontaVista Software Inc.
*
* Heavily modified by Scott Wood <[email protected]>
* Copyright 2007 Freescale Semiconductor, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/fsl_devices.h>
#include <linux/mii.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/delay.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/time.h>
#include <asm/8xx_immap.h>
#include <asm/cpm1.h>
#include <asm/udbg.h>
#include "mpc885ads.h"
#include "mpc8xx.h"
#include "pic.h"
static u32 __iomem *bcsr, *bcsr5;
struct cpm_pin {
int port, pin, flags;
};
static struct cpm_pin mpc885ads_pins[] = {
/* SMC1 */
{CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */
{CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
/* SMC2 */
#ifndef CONFIG_MPC8xx_SECOND_ETH_FEC2
{CPM_PORTE, 21, CPM_PIN_INPUT}, /* RX */
{CPM_PORTE, 20, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
#endif
/* SCC3 */
{CPM_PORTA, 9, CPM_PIN_INPUT}, /* RX */
{CPM_PORTA, 8, CPM_PIN_INPUT}, /* TX */
{CPM_PORTC, 4, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* RENA */
{CPM_PORTC, 5, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* CLSN */
{CPM_PORTE, 27, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TENA */
{CPM_PORTE, 17, CPM_PIN_INPUT}, /* CLK5 */
{CPM_PORTE, 16, CPM_PIN_INPUT}, /* CLK6 */
/* MII1 */
{CPM_PORTA, 0, CPM_PIN_INPUT},
{CPM_PORTA, 1, CPM_PIN_INPUT},
{CPM_PORTA, 2, CPM_PIN_INPUT},
{CPM_PORTA, 3, CPM_PIN_INPUT},
{CPM_PORTA, 4, CPM_PIN_OUTPUT},
{CPM_PORTA, 10, CPM_PIN_OUTPUT},
{CPM_PORTA, 11, CPM_PIN_OUTPUT},
{CPM_PORTB, 19, CPM_PIN_INPUT},
{CPM_PORTB, 31, CPM_PIN_INPUT},
{CPM_PORTC, 12, CPM_PIN_INPUT},
{CPM_PORTC, 13, CPM_PIN_INPUT},
{CPM_PORTE, 30, CPM_PIN_OUTPUT},
{CPM_PORTE, 31, CPM_PIN_OUTPUT},
/* MII2 */
#ifdef CONFIG_MPC8xx_SECOND_ETH_FEC2
{CPM_PORTE, 14, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 15, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 16, CPM_PIN_OUTPUT},
{CPM_PORTE, 17, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 18, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 19, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 20, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{CPM_PORTE, 21, CPM_PIN_OUTPUT},
{CPM_PORTE, 22, CPM_PIN_OUTPUT},
{CPM_PORTE, 23, CPM_PIN_OUTPUT},
{CPM_PORTE, 24, CPM_PIN_OUTPUT},
{CPM_PORTE, 25, CPM_PIN_OUTPUT},
{CPM_PORTE, 26, CPM_PIN_OUTPUT},
{CPM_PORTE, 27, CPM_PIN_OUTPUT},
{CPM_PORTE, 28, CPM_PIN_OUTPUT},
{CPM_PORTE, 29, CPM_PIN_OUTPUT},
#endif
/* I2C */
{CPM_PORTB, 26, CPM_PIN_INPUT | CPM_PIN_OPENDRAIN},
{CPM_PORTB, 27, CPM_PIN_INPUT | CPM_PIN_OPENDRAIN},
};
static void __init init_ioports(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(mpc885ads_pins); i++) {
struct cpm_pin *pin = &mpc885ads_pins[i];
cpm1_set_pin(pin->port, pin->pin, pin->flags);
}
cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX);
cpm1_clk_setup(CPM_CLK_SMC2, CPM_BRG2, CPM_CLK_RTX);
cpm1_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_TX);
cpm1_clk_setup(CPM_CLK_SCC3, CPM_CLK6, CPM_CLK_RX);
/* Set FEC1 and FEC2 to MII mode */
clrbits32(&mpc8xx_immr->im_cpm.cp_cptr, 0x00000180);
}
static void __init mpc885ads_setup_arch(void)
{
struct device_node *np;
cpm_reset();
init_ioports();
np = of_find_compatible_node(NULL, NULL, "fsl,mpc885ads-bcsr");
if (!np) {
printk(KERN_CRIT "Could not find fsl,mpc885ads-bcsr node\n");
return;
}
bcsr = of_iomap(np, 0);
bcsr5 = of_iomap(np, 1);
of_node_put(np);
if (!bcsr || !bcsr5) {
printk(KERN_CRIT "Could not remap BCSR\n");
return;
}
clrbits32(&bcsr[1], BCSR1_RS232EN_1);
#ifdef CONFIG_MPC8xx_SECOND_ETH_FEC2
setbits32(&bcsr[1], BCSR1_RS232EN_2);
#else
clrbits32(&bcsr[1], BCSR1_RS232EN_2);
#endif
clrbits32(bcsr5, BCSR5_MII1_EN);
setbits32(bcsr5, BCSR5_MII1_RST);
udelay(1000);
clrbits32(bcsr5, BCSR5_MII1_RST);
#ifdef CONFIG_MPC8xx_SECOND_ETH_FEC2
clrbits32(bcsr5, BCSR5_MII2_EN);
setbits32(bcsr5, BCSR5_MII2_RST);
udelay(1000);
clrbits32(bcsr5, BCSR5_MII2_RST);
#else
setbits32(bcsr5, BCSR5_MII2_EN);
#endif
#ifdef CONFIG_MPC8xx_SECOND_ETH_SCC3
clrbits32(&bcsr[4], BCSR4_ETH10_RST);
udelay(1000);
setbits32(&bcsr[4], BCSR4_ETH10_RST);
setbits32(&bcsr[1], BCSR1_ETHEN);
np = of_find_node_by_path("/soc@ff000000/cpm@9c0/serial@a80");
#else
np = of_find_node_by_path("/soc@ff000000/cpm@9c0/ethernet@a40");
#endif
/* The SCC3 enet registers overlap the SMC1 registers, so
* one of the two must be removed from the device tree.
*/
if (np) {
of_detach_node(np);
of_node_put(np);
}
}
static const struct of_device_id of_bus_ids[] __initconst = {
{ .name = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
{},
};
static int __init declare_of_platform_devices(void)
{
/* Publish the QE devices */
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_device_initcall(mpc885_ads, declare_of_platform_devices);
define_machine(mpc885_ads) {
.name = "Freescale MPC885 ADS",
.compatible = "fsl,mpc885ads",
.setup_arch = mpc885ads_setup_arch,
.init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = mpc8xx_calibrate_decr,
.progress = udbg_progress,
};
| linux-master | arch/powerpc/platforms/8xx/mpc885ads_setup.c |
// SPDX-License-Identifier: GPL-2.0
/*
* General Purpose functions for the global management of the
* Communication Processor Module.
* Copyright (c) 1997 Dan error_act ([email protected])
*
* In addition to the individual control of the communication
* channels, there are a few functions that globally affect the
* communication processor.
*
* Buffer descriptors must be allocated from the dual ported memory
* space. The allocator for that is here. When the communication
* process is reset, we reclaim the memory available. There is
* currently no deallocator for this memory.
* The amount of space available is platform dependent. On the
* MBX, the EPPC software loads additional microcode into the
* communication processor, and uses some of the DP ram for this
* purpose. Current, the first 512 bytes and the last 256 bytes of
* memory are used. Right now I am conservative and only use the
* memory that can never be used for microcode. If there are
* applications that require more DP ram, we can expand the boundaries
* but then we have to be careful of any downloaded microcode.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/of_irq.h>
#include <asm/page.h>
#include <asm/8xx_immap.h>
#include <asm/cpm1.h>
#include <asm/io.h>
#include <asm/rheap.h>
#include <asm/cpm.h>
#include <sysdev/fsl_soc.h>
#ifdef CONFIG_8xx_GPIO
#include <linux/gpio/legacy-of-mm-gpiochip.h>
#endif
#define CPM_MAP_SIZE (0x4000)
cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */
immap_t __iomem *mpc8xx_immr = (void __iomem *)VIRT_IMMR_BASE;
void __init cpm_reset(void)
{
cpmp = &mpc8xx_immr->im_cpm;
#ifndef CONFIG_PPC_EARLY_DEBUG_CPM
/* Perform a reset. */
out_be16(&cpmp->cp_cpcr, CPM_CR_RST | CPM_CR_FLG);
/* Wait for it. */
while (in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG);
#endif
#ifdef CONFIG_UCODE_PATCH
cpm_load_patch(cpmp);
#endif
/*
* Set SDMA Bus Request priority 5.
* On 860T, this also enables FEC priority 6. I am not sure
* this is what we really want for some applications, but the
* manual recommends it.
* Bit 25, FAM can also be set to use FEC aggressive mode (860T).
*/
if ((mfspr(SPRN_IMMR) & 0xffff) == 0x0900) /* MPC885 */
out_be32(&mpc8xx_immr->im_siu_conf.sc_sdcr, 0x40);
else
out_be32(&mpc8xx_immr->im_siu_conf.sc_sdcr, 1);
}
static DEFINE_SPINLOCK(cmd_lock);
#define MAX_CR_CMD_LOOPS 10000
int cpm_command(u32 command, u8 opcode)
{
int i, ret;
unsigned long flags;
if (command & 0xffffff03)
return -EINVAL;
spin_lock_irqsave(&cmd_lock, flags);
ret = 0;
out_be16(&cpmp->cp_cpcr, command | CPM_CR_FLG | (opcode << 8));
for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
if ((in_be16(&cpmp->cp_cpcr) & CPM_CR_FLG) == 0)
goto out;
printk(KERN_ERR "%s(): Not able to issue CPM command\n", __func__);
ret = -EIO;
out:
spin_unlock_irqrestore(&cmd_lock, flags);
return ret;
}
EXPORT_SYMBOL(cpm_command);
/*
* Set a baud rate generator. This needs lots of work. There are
* four BRGs, any of which can be wired to any channel.
* The internal baud rate clock is the system clock divided by 16.
* This assumes the baudrate is 16x oversampled by the uart.
*/
#define BRG_INT_CLK (get_brgfreq())
#define BRG_UART_CLK (BRG_INT_CLK/16)
#define BRG_UART_CLK_DIV16 (BRG_UART_CLK/16)
void
cpm_setbrg(uint brg, uint rate)
{
u32 __iomem *bp;
/* This is good enough to get SMCs running..... */
bp = &cpmp->cp_brgc1;
bp += brg;
/*
* The BRG has a 12-bit counter. For really slow baud rates (or
* really fast processors), we may have to further divide by 16.
*/
if (((BRG_UART_CLK / rate) - 1) < 4096)
out_be32(bp, (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN);
else
out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
CPM_BRG_EN | CPM_BRG_DIV16);
}
EXPORT_SYMBOL(cpm_setbrg);
struct cpm_ioport16 {
__be16 dir, par, odr_sor, dat, intr;
__be16 res[3];
};
struct cpm_ioport32b {
__be32 dir, par, odr, dat;
};
struct cpm_ioport32e {
__be32 dir, par, sor, odr, dat;
};
static void __init cpm1_set_pin32(int port, int pin, int flags)
{
struct cpm_ioport32e __iomem *iop;
pin = 1 << (31 - pin);
if (port == CPM_PORTB)
iop = (struct cpm_ioport32e __iomem *)
&mpc8xx_immr->im_cpm.cp_pbdir;
else
iop = (struct cpm_ioport32e __iomem *)
&mpc8xx_immr->im_cpm.cp_pedir;
if (flags & CPM_PIN_OUTPUT)
setbits32(&iop->dir, pin);
else
clrbits32(&iop->dir, pin);
if (!(flags & CPM_PIN_GPIO))
setbits32(&iop->par, pin);
else
clrbits32(&iop->par, pin);
if (port == CPM_PORTB) {
if (flags & CPM_PIN_OPENDRAIN)
setbits16(&mpc8xx_immr->im_cpm.cp_pbodr, pin);
else
clrbits16(&mpc8xx_immr->im_cpm.cp_pbodr, pin);
}
if (port == CPM_PORTE) {
if (flags & CPM_PIN_SECONDARY)
setbits32(&iop->sor, pin);
else
clrbits32(&iop->sor, pin);
if (flags & CPM_PIN_OPENDRAIN)
setbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin);
else
clrbits32(&mpc8xx_immr->im_cpm.cp_peodr, pin);
}
}
static void __init cpm1_set_pin16(int port, int pin, int flags)
{
struct cpm_ioport16 __iomem *iop =
(struct cpm_ioport16 __iomem *)&mpc8xx_immr->im_ioport;
pin = 1 << (15 - pin);
if (port != 0)
iop += port - 1;
if (flags & CPM_PIN_OUTPUT)
setbits16(&iop->dir, pin);
else
clrbits16(&iop->dir, pin);
if (!(flags & CPM_PIN_GPIO))
setbits16(&iop->par, pin);
else
clrbits16(&iop->par, pin);
if (port == CPM_PORTA) {
if (flags & CPM_PIN_OPENDRAIN)
setbits16(&iop->odr_sor, pin);
else
clrbits16(&iop->odr_sor, pin);
}
if (port == CPM_PORTC) {
if (flags & CPM_PIN_SECONDARY)
setbits16(&iop->odr_sor, pin);
else
clrbits16(&iop->odr_sor, pin);
if (flags & CPM_PIN_FALLEDGE)
setbits16(&iop->intr, pin);
else
clrbits16(&iop->intr, pin);
}
}
void __init cpm1_set_pin(enum cpm_port port, int pin, int flags)
{
if (port == CPM_PORTB || port == CPM_PORTE)
cpm1_set_pin32(port, pin, flags);
else
cpm1_set_pin16(port, pin, flags);
}
int __init cpm1_clk_setup(enum cpm_clk_target target, int clock, int mode)
{
int shift;
int i, bits = 0;
u32 __iomem *reg;
u32 mask = 7;
u8 clk_map[][3] = {
{CPM_CLK_SCC1, CPM_BRG1, 0},
{CPM_CLK_SCC1, CPM_BRG2, 1},
{CPM_CLK_SCC1, CPM_BRG3, 2},
{CPM_CLK_SCC1, CPM_BRG4, 3},
{CPM_CLK_SCC1, CPM_CLK1, 4},
{CPM_CLK_SCC1, CPM_CLK2, 5},
{CPM_CLK_SCC1, CPM_CLK3, 6},
{CPM_CLK_SCC1, CPM_CLK4, 7},
{CPM_CLK_SCC2, CPM_BRG1, 0},
{CPM_CLK_SCC2, CPM_BRG2, 1},
{CPM_CLK_SCC2, CPM_BRG3, 2},
{CPM_CLK_SCC2, CPM_BRG4, 3},
{CPM_CLK_SCC2, CPM_CLK1, 4},
{CPM_CLK_SCC2, CPM_CLK2, 5},
{CPM_CLK_SCC2, CPM_CLK3, 6},
{CPM_CLK_SCC2, CPM_CLK4, 7},
{CPM_CLK_SCC3, CPM_BRG1, 0},
{CPM_CLK_SCC3, CPM_BRG2, 1},
{CPM_CLK_SCC3, CPM_BRG3, 2},
{CPM_CLK_SCC3, CPM_BRG4, 3},
{CPM_CLK_SCC3, CPM_CLK5, 4},
{CPM_CLK_SCC3, CPM_CLK6, 5},
{CPM_CLK_SCC3, CPM_CLK7, 6},
{CPM_CLK_SCC3, CPM_CLK8, 7},
{CPM_CLK_SCC4, CPM_BRG1, 0},
{CPM_CLK_SCC4, CPM_BRG2, 1},
{CPM_CLK_SCC4, CPM_BRG3, 2},
{CPM_CLK_SCC4, CPM_BRG4, 3},
{CPM_CLK_SCC4, CPM_CLK5, 4},
{CPM_CLK_SCC4, CPM_CLK6, 5},
{CPM_CLK_SCC4, CPM_CLK7, 6},
{CPM_CLK_SCC4, CPM_CLK8, 7},
{CPM_CLK_SMC1, CPM_BRG1, 0},
{CPM_CLK_SMC1, CPM_BRG2, 1},
{CPM_CLK_SMC1, CPM_BRG3, 2},
{CPM_CLK_SMC1, CPM_BRG4, 3},
{CPM_CLK_SMC1, CPM_CLK1, 4},
{CPM_CLK_SMC1, CPM_CLK2, 5},
{CPM_CLK_SMC1, CPM_CLK3, 6},
{CPM_CLK_SMC1, CPM_CLK4, 7},
{CPM_CLK_SMC2, CPM_BRG1, 0},
{CPM_CLK_SMC2, CPM_BRG2, 1},
{CPM_CLK_SMC2, CPM_BRG3, 2},
{CPM_CLK_SMC2, CPM_BRG4, 3},
{CPM_CLK_SMC2, CPM_CLK5, 4},
{CPM_CLK_SMC2, CPM_CLK6, 5},
{CPM_CLK_SMC2, CPM_CLK7, 6},
{CPM_CLK_SMC2, CPM_CLK8, 7},
};
switch (target) {
case CPM_CLK_SCC1:
reg = &mpc8xx_immr->im_cpm.cp_sicr;
shift = 0;
break;
case CPM_CLK_SCC2:
reg = &mpc8xx_immr->im_cpm.cp_sicr;
shift = 8;
break;
case CPM_CLK_SCC3:
reg = &mpc8xx_immr->im_cpm.cp_sicr;
shift = 16;
break;
case CPM_CLK_SCC4:
reg = &mpc8xx_immr->im_cpm.cp_sicr;
shift = 24;
break;
case CPM_CLK_SMC1:
reg = &mpc8xx_immr->im_cpm.cp_simode;
shift = 12;
break;
case CPM_CLK_SMC2:
reg = &mpc8xx_immr->im_cpm.cp_simode;
shift = 28;
break;
default:
printk(KERN_ERR "cpm1_clock_setup: invalid clock target\n");
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
if (clk_map[i][0] == target && clk_map[i][1] == clock) {
bits = clk_map[i][2];
break;
}
}
if (i == ARRAY_SIZE(clk_map)) {
printk(KERN_ERR "cpm1_clock_setup: invalid clock combination\n");
return -EINVAL;
}
bits <<= shift;
mask <<= shift;
if (reg == &mpc8xx_immr->im_cpm.cp_sicr) {
if (mode == CPM_CLK_RTX) {
bits |= bits << 3;
mask |= mask << 3;
} else if (mode == CPM_CLK_RX) {
bits <<= 3;
mask <<= 3;
}
}
out_be32(reg, (in_be32(reg) & ~mask) | bits);
return 0;
}
/*
* GPIO LIB API implementation
*/
#ifdef CONFIG_8xx_GPIO
struct cpm1_gpio16_chip {
struct of_mm_gpio_chip mm_gc;
spinlock_t lock;
/* shadowed data register to clear/set bits safely */
u16 cpdata;
/* IRQ associated with Pins when relevant */
int irq[16];
};
static void cpm1_gpio16_save_regs(struct of_mm_gpio_chip *mm_gc)
{
struct cpm1_gpio16_chip *cpm1_gc =
container_of(mm_gc, struct cpm1_gpio16_chip, mm_gc);
struct cpm_ioport16 __iomem *iop = mm_gc->regs;
cpm1_gc->cpdata = in_be16(&iop->dat);
}
static int cpm1_gpio16_get(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm_ioport16 __iomem *iop = mm_gc->regs;
u16 pin_mask;
pin_mask = 1 << (15 - gpio);
return !!(in_be16(&iop->dat) & pin_mask);
}
static void __cpm1_gpio16_set(struct of_mm_gpio_chip *mm_gc, u16 pin_mask,
int value)
{
struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
struct cpm_ioport16 __iomem *iop = mm_gc->regs;
if (value)
cpm1_gc->cpdata |= pin_mask;
else
cpm1_gc->cpdata &= ~pin_mask;
out_be16(&iop->dat, cpm1_gc->cpdata);
}
static void cpm1_gpio16_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
unsigned long flags;
u16 pin_mask = 1 << (15 - gpio);
spin_lock_irqsave(&cpm1_gc->lock, flags);
__cpm1_gpio16_set(mm_gc, pin_mask, value);
spin_unlock_irqrestore(&cpm1_gc->lock, flags);
}
static int cpm1_gpio16_to_irq(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
return cpm1_gc->irq[gpio] ? : -ENXIO;
}
static int cpm1_gpio16_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
struct cpm_ioport16 __iomem *iop = mm_gc->regs;
unsigned long flags;
u16 pin_mask = 1 << (15 - gpio);
spin_lock_irqsave(&cpm1_gc->lock, flags);
setbits16(&iop->dir, pin_mask);
__cpm1_gpio16_set(mm_gc, pin_mask, val);
spin_unlock_irqrestore(&cpm1_gc->lock, flags);
return 0;
}
static int cpm1_gpio16_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm1_gpio16_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
struct cpm_ioport16 __iomem *iop = mm_gc->regs;
unsigned long flags;
u16 pin_mask = 1 << (15 - gpio);
spin_lock_irqsave(&cpm1_gc->lock, flags);
clrbits16(&iop->dir, pin_mask);
spin_unlock_irqrestore(&cpm1_gc->lock, flags);
return 0;
}
int cpm1_gpiochip_add16(struct device *dev)
{
struct device_node *np = dev->of_node;
struct cpm1_gpio16_chip *cpm1_gc;
struct of_mm_gpio_chip *mm_gc;
struct gpio_chip *gc;
u16 mask;
cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL);
if (!cpm1_gc)
return -ENOMEM;
spin_lock_init(&cpm1_gc->lock);
if (!of_property_read_u16(np, "fsl,cpm1-gpio-irq-mask", &mask)) {
int i, j;
for (i = 0, j = 0; i < 16; i++)
if (mask & (1 << (15 - i)))
cpm1_gc->irq[i] = irq_of_parse_and_map(np, j++);
}
mm_gc = &cpm1_gc->mm_gc;
gc = &mm_gc->gc;
mm_gc->save_regs = cpm1_gpio16_save_regs;
gc->ngpio = 16;
gc->direction_input = cpm1_gpio16_dir_in;
gc->direction_output = cpm1_gpio16_dir_out;
gc->get = cpm1_gpio16_get;
gc->set = cpm1_gpio16_set;
gc->to_irq = cpm1_gpio16_to_irq;
gc->parent = dev;
gc->owner = THIS_MODULE;
return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc);
}
struct cpm1_gpio32_chip {
struct of_mm_gpio_chip mm_gc;
spinlock_t lock;
/* shadowed data register to clear/set bits safely */
u32 cpdata;
};
static void cpm1_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
{
struct cpm1_gpio32_chip *cpm1_gc =
container_of(mm_gc, struct cpm1_gpio32_chip, mm_gc);
struct cpm_ioport32b __iomem *iop = mm_gc->regs;
cpm1_gc->cpdata = in_be32(&iop->dat);
}
static int cpm1_gpio32_get(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm_ioport32b __iomem *iop = mm_gc->regs;
u32 pin_mask;
pin_mask = 1 << (31 - gpio);
return !!(in_be32(&iop->dat) & pin_mask);
}
static void __cpm1_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask,
int value)
{
struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
struct cpm_ioport32b __iomem *iop = mm_gc->regs;
if (value)
cpm1_gc->cpdata |= pin_mask;
else
cpm1_gc->cpdata &= ~pin_mask;
out_be32(&iop->dat, cpm1_gc->cpdata);
}
static void cpm1_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
unsigned long flags;
u32 pin_mask = 1 << (31 - gpio);
spin_lock_irqsave(&cpm1_gc->lock, flags);
__cpm1_gpio32_set(mm_gc, pin_mask, value);
spin_unlock_irqrestore(&cpm1_gc->lock, flags);
}
static int cpm1_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
struct cpm_ioport32b __iomem *iop = mm_gc->regs;
unsigned long flags;
u32 pin_mask = 1 << (31 - gpio);
spin_lock_irqsave(&cpm1_gc->lock, flags);
setbits32(&iop->dir, pin_mask);
__cpm1_gpio32_set(mm_gc, pin_mask, val);
spin_unlock_irqrestore(&cpm1_gc->lock, flags);
return 0;
}
static int cpm1_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm1_gpio32_chip *cpm1_gc = gpiochip_get_data(&mm_gc->gc);
struct cpm_ioport32b __iomem *iop = mm_gc->regs;
unsigned long flags;
u32 pin_mask = 1 << (31 - gpio);
spin_lock_irqsave(&cpm1_gc->lock, flags);
clrbits32(&iop->dir, pin_mask);
spin_unlock_irqrestore(&cpm1_gc->lock, flags);
return 0;
}
int cpm1_gpiochip_add32(struct device *dev)
{
struct device_node *np = dev->of_node;
struct cpm1_gpio32_chip *cpm1_gc;
struct of_mm_gpio_chip *mm_gc;
struct gpio_chip *gc;
cpm1_gc = kzalloc(sizeof(*cpm1_gc), GFP_KERNEL);
if (!cpm1_gc)
return -ENOMEM;
spin_lock_init(&cpm1_gc->lock);
mm_gc = &cpm1_gc->mm_gc;
gc = &mm_gc->gc;
mm_gc->save_regs = cpm1_gpio32_save_regs;
gc->ngpio = 32;
gc->direction_input = cpm1_gpio32_dir_in;
gc->direction_output = cpm1_gpio32_dir_out;
gc->get = cpm1_gpio32_get;
gc->set = cpm1_gpio32_set;
gc->parent = dev;
gc->owner = THIS_MODULE;
return of_mm_gpiochip_add_data(np, mm_gc, cpm1_gc);
}
#endif /* CONFIG_8xx_GPIO */
| linux-master | arch/powerpc/platforms/8xx/cpm1.c |
/*
* Platform setup for the Embedded Planet EP88xC board
*
* Author: Scott Wood <[email protected]>
* Copyright 2007 Freescale Semiconductor, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/init.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/udbg.h>
#include <asm/cpm1.h>
#include "mpc8xx.h"
#include "pic.h"
struct cpm_pin {
int port, pin, flags;
};
static struct cpm_pin ep88xc_pins[] = {
/* SMC1 */
{1, 24, CPM_PIN_INPUT}, /* RX */
{1, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
/* SCC2 */
{0, 12, CPM_PIN_INPUT}, /* TX */
{0, 13, CPM_PIN_INPUT}, /* RX */
{2, 8, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* CD */
{2, 9, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO}, /* CTS */
{2, 14, CPM_PIN_INPUT}, /* RTS */
/* MII1 */
{0, 0, CPM_PIN_INPUT},
{0, 1, CPM_PIN_INPUT},
{0, 2, CPM_PIN_INPUT},
{0, 3, CPM_PIN_INPUT},
{0, 4, CPM_PIN_OUTPUT},
{0, 10, CPM_PIN_OUTPUT},
{0, 11, CPM_PIN_OUTPUT},
{1, 19, CPM_PIN_INPUT},
{1, 31, CPM_PIN_INPUT},
{2, 12, CPM_PIN_INPUT},
{2, 13, CPM_PIN_INPUT},
{3, 8, CPM_PIN_INPUT},
{4, 30, CPM_PIN_OUTPUT},
{4, 31, CPM_PIN_OUTPUT},
/* MII2 */
{4, 14, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{4, 15, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{4, 16, CPM_PIN_OUTPUT},
{4, 17, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{4, 18, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{4, 19, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{4, 20, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{4, 21, CPM_PIN_OUTPUT},
{4, 22, CPM_PIN_OUTPUT},
{4, 23, CPM_PIN_OUTPUT},
{4, 24, CPM_PIN_OUTPUT},
{4, 25, CPM_PIN_OUTPUT},
{4, 26, CPM_PIN_OUTPUT},
{4, 27, CPM_PIN_OUTPUT},
{4, 28, CPM_PIN_OUTPUT},
{4, 29, CPM_PIN_OUTPUT},
/* USB */
{0, 6, CPM_PIN_INPUT}, /* CLK2 */
{0, 14, CPM_PIN_INPUT}, /* USBOE */
{0, 15, CPM_PIN_INPUT}, /* USBRXD */
{2, 6, CPM_PIN_OUTPUT}, /* USBTXN */
{2, 7, CPM_PIN_OUTPUT}, /* USBTXP */
{2, 10, CPM_PIN_INPUT}, /* USBRXN */
{2, 11, CPM_PIN_INPUT}, /* USBRXP */
/* Misc */
{1, 26, CPM_PIN_INPUT}, /* BRGO2 */
{1, 27, CPM_PIN_INPUT}, /* BRGO1 */
};
static void __init init_ioports(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(ep88xc_pins); i++) {
struct cpm_pin *pin = &ep88xc_pins[i];
cpm1_set_pin(pin->port, pin->pin, pin->flags);
}
cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX);
cpm1_clk_setup(CPM_CLK_SCC1, CPM_CLK2, CPM_CLK_TX); /* USB */
cpm1_clk_setup(CPM_CLK_SCC1, CPM_CLK2, CPM_CLK_RX);
cpm1_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_TX);
cpm1_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_RX);
}
static u8 __iomem *ep88xc_bcsr;
#define BCSR7_SCC2_ENABLE 0x10
#define BCSR8_PHY1_ENABLE 0x80
#define BCSR8_PHY1_POWER 0x40
#define BCSR8_PHY2_ENABLE 0x20
#define BCSR8_PHY2_POWER 0x10
#define BCSR9_USB_ENABLE 0x80
#define BCSR9_USB_POWER 0x40
#define BCSR9_USB_HOST 0x20
#define BCSR9_USB_FULL_SPEED_TARGET 0x10
static void __init ep88xc_setup_arch(void)
{
struct device_node *np;
cpm_reset();
init_ioports();
np = of_find_compatible_node(NULL, NULL, "fsl,ep88xc-bcsr");
if (!np) {
printk(KERN_CRIT "Could not find fsl,ep88xc-bcsr node\n");
return;
}
ep88xc_bcsr = of_iomap(np, 0);
of_node_put(np);
if (!ep88xc_bcsr) {
printk(KERN_CRIT "Could not remap BCSR\n");
return;
}
setbits8(&ep88xc_bcsr[7], BCSR7_SCC2_ENABLE);
setbits8(&ep88xc_bcsr[8], BCSR8_PHY1_ENABLE | BCSR8_PHY1_POWER |
BCSR8_PHY2_ENABLE | BCSR8_PHY2_POWER);
}
static const struct of_device_id of_bus_ids[] __initconst = {
{ .name = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
{},
};
static int __init declare_of_platform_devices(void)
{
/* Publish the QE devices */
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_device_initcall(ep88xc, declare_of_platform_devices);
define_machine(ep88xc) {
.name = "Embedded Planet EP88xC",
.compatible = "fsl,ep88xc",
.setup_arch = ep88xc_setup_arch,
.init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = mpc8xx_calibrate_decr,
.progress = udbg_progress,
};
| linux-master | arch/powerpc/platforms/8xx/ep88xc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Interrupt controller for the
* Communication Processor Module.
* Copyright (c) 1997 Dan error_act ([email protected])
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/platform_device.h>
#include <asm/cpm1.h>
struct cpm_pic_data {
cpic8xx_t __iomem *reg;
struct irq_domain *host;
};
static void cpm_mask_irq(struct irq_data *d)
{
struct cpm_pic_data *data = irq_data_get_irq_chip_data(d);
unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
clrbits32(&data->reg->cpic_cimr, (1 << cpm_vec));
}
static void cpm_unmask_irq(struct irq_data *d)
{
struct cpm_pic_data *data = irq_data_get_irq_chip_data(d);
unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
setbits32(&data->reg->cpic_cimr, (1 << cpm_vec));
}
static void cpm_end_irq(struct irq_data *d)
{
struct cpm_pic_data *data = irq_data_get_irq_chip_data(d);
unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
out_be32(&data->reg->cpic_cisr, (1 << cpm_vec));
}
static struct irq_chip cpm_pic = {
.name = "CPM PIC",
.irq_mask = cpm_mask_irq,
.irq_unmask = cpm_unmask_irq,
.irq_eoi = cpm_end_irq,
};
static int cpm_get_irq(struct irq_desc *desc)
{
struct cpm_pic_data *data = irq_desc_get_handler_data(desc);
int cpm_vec;
/*
* Get the vector by setting the ACK bit and then reading
* the register.
*/
out_be16(&data->reg->cpic_civr, 1);
cpm_vec = in_be16(&data->reg->cpic_civr);
cpm_vec >>= 11;
return irq_linear_revmap(data->host, cpm_vec);
}
static void cpm_cascade(struct irq_desc *desc)
{
generic_handle_irq(cpm_get_irq(desc));
}
static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_chip_data(virq, h->host_data);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
return 0;
}
static const struct irq_domain_ops cpm_pic_host_ops = {
.map = cpm_pic_host_map,
};
static int cpm_pic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
int irq;
struct cpm_pic_data *data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->reg = devm_ioremap(dev, res->start, resource_size(res));
if (!data->reg)
return -ENODEV;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
/* Initialize the CPM interrupt controller. */
out_be32(&data->reg->cpic_cicr,
(CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
((virq_to_hw(irq) / 2) << 13) | CICR_HP_MASK);
out_be32(&data->reg->cpic_cimr, 0);
data->host = irq_domain_add_linear(dev->of_node, 64, &cpm_pic_host_ops, data);
if (!data->host)
return -ENODEV;
irq_set_handler_data(irq, data);
irq_set_chained_handler(irq, cpm_cascade);
setbits32(&data->reg->cpic_cicr, CICR_IEN);
return 0;
}
static const struct of_device_id cpm_pic_match[] = {
{
.compatible = "fsl,cpm1-pic",
}, {
.type = "cpm-pic",
.compatible = "CPM",
}, {},
};
static struct platform_driver cpm_pic_driver = {
.driver = {
.name = "cpm-pic",
.of_match_table = cpm_pic_match,
},
.probe = cpm_pic_probe,
};
static int __init cpm_pic_init(void)
{
return platform_driver_register(&cpm_pic_driver);
}
arch_initcall(cpm_pic_init);
/*
* The CPM can generate the error interrupt when there is a race condition
* between generating and masking interrupts. All we have to do is ACK it
* and return. This is a no-op function so we don't need any special
* tests in the interrupt handler.
*/
static irqreturn_t cpm_error_interrupt(int irq, void *dev)
{
return IRQ_HANDLED;
}
static int cpm_error_probe(struct platform_device *pdev)
{
int irq;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
return request_irq(irq, cpm_error_interrupt, IRQF_NO_THREAD, "error", NULL);
}
static const struct of_device_id cpm_error_ids[] = {
{ .compatible = "fsl,cpm1" },
{ .type = "cpm" },
{},
};
static struct platform_driver cpm_error_driver = {
.driver = {
.name = "cpm-error",
.of_match_table = cpm_error_ids,
},
.probe = cpm_error_probe,
};
static int __init cpm_error_init(void)
{
return platform_driver_register(&cpm_error_driver);
}
subsys_initcall(cpm_error_init);
| linux-master | arch/powerpc/platforms/8xx/cpm1-ic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*/
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/ptrace.h>
#include <asm/reg.h>
int machine_check_8xx(struct pt_regs *regs)
{
unsigned long reason = regs->msr;
pr_err("Machine check in kernel mode.\n");
pr_err("Caused by (from SRR1=%lx): ", reason);
if (reason & 0x40000000)
pr_cont("Fetch error at address %lx\n", regs->nip);
else
pr_cont("Data access error at address %lx\n", regs->dar);
#ifdef CONFIG_PCI
/* the qspan pci read routines can cause machine checks -- Cort
*
* yuck !!! that totally needs to go away ! There are better ways
* to deal with that than having a wart in the mcheck handler.
* -- BenH
*/
bad_page_fault(regs, SIGBUS);
return 1;
#else
return 0;
#endif
}
| linux-master | arch/powerpc/platforms/8xx/machine_check.c |
/*
* Platform setup for the MPC8xx based boards from TQM.
*
* Heiko Schocher <[email protected]>
* Copyright 2010 DENX Software Engineering GmbH
*
* based on:
* Vitaly Bordug <[email protected]>
*
* Copyright 2005 MontaVista Software Inc.
*
* Heavily modified by Scott Wood <[email protected]>
* Copyright 2007 Freescale Semiconductor, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/init.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/fsl_devices.h>
#include <linux/mii.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <asm/delay.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/time.h>
#include <asm/8xx_immap.h>
#include <asm/cpm1.h>
#include <asm/udbg.h>
#include "mpc8xx.h"
#include "pic.h"
struct cpm_pin {
int port, pin, flags;
};
static struct cpm_pin tqm8xx_pins[] __initdata = {
/* SMC1 */
{CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */
{CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
/* SCC1 */
{CPM_PORTA, 5, CPM_PIN_INPUT}, /* CLK1 */
{CPM_PORTA, 7, CPM_PIN_INPUT}, /* CLK2 */
{CPM_PORTA, 14, CPM_PIN_INPUT}, /* TX */
{CPM_PORTA, 15, CPM_PIN_INPUT}, /* RX */
{CPM_PORTC, 15, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TENA */
{CPM_PORTC, 10, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO},
{CPM_PORTC, 11, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO},
};
static struct cpm_pin tqm8xx_fec_pins[] __initdata = {
/* MII */
{CPM_PORTD, 3, CPM_PIN_OUTPUT},
{CPM_PORTD, 4, CPM_PIN_OUTPUT},
{CPM_PORTD, 5, CPM_PIN_OUTPUT},
{CPM_PORTD, 6, CPM_PIN_OUTPUT},
{CPM_PORTD, 7, CPM_PIN_OUTPUT},
{CPM_PORTD, 8, CPM_PIN_OUTPUT},
{CPM_PORTD, 9, CPM_PIN_OUTPUT},
{CPM_PORTD, 10, CPM_PIN_OUTPUT},
{CPM_PORTD, 11, CPM_PIN_OUTPUT},
{CPM_PORTD, 12, CPM_PIN_OUTPUT},
{CPM_PORTD, 13, CPM_PIN_OUTPUT},
{CPM_PORTD, 14, CPM_PIN_OUTPUT},
{CPM_PORTD, 15, CPM_PIN_OUTPUT},
};
static void __init init_pins(int n, struct cpm_pin *pin)
{
int i;
for (i = 0; i < n; i++) {
cpm1_set_pin(pin->port, pin->pin, pin->flags);
pin++;
}
}
static void __init init_ioports(void)
{
struct device_node *dnode;
struct property *prop;
int len;
init_pins(ARRAY_SIZE(tqm8xx_pins), &tqm8xx_pins[0]);
cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX);
dnode = of_find_node_by_name(NULL, "aliases");
if (dnode == NULL)
return;
prop = of_find_property(dnode, "ethernet1", &len);
of_node_put(dnode);
if (prop == NULL)
return;
/* init FEC pins */
init_pins(ARRAY_SIZE(tqm8xx_fec_pins), &tqm8xx_fec_pins[0]);
}
static void __init tqm8xx_setup_arch(void)
{
cpm_reset();
init_ioports();
}
static const struct of_device_id of_bus_ids[] __initconst = {
{ .name = "soc", },
{ .name = "cpm", },
{ .name = "localbus", },
{ .compatible = "simple-bus" },
{},
};
static int __init declare_of_platform_devices(void)
{
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_device_initcall(tqm8xx, declare_of_platform_devices);
define_machine(tqm8xx) {
.name = "TQM8xx",
.compatible = "tqc,tqm8xx",
.setup_arch = tqm8xx_setup_arch,
.init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = mpc8xx_calibrate_decr,
.set_rtc_time = mpc8xx_set_rtc_time,
.get_rtc_time = mpc8xx_get_rtc_time,
.progress = udbg_progress,
};
| linux-master | arch/powerpc/platforms/8xx/tqm8xx_setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Ebony board specific routines
*
* Matt Porter <[email protected]>
* Copyright 2002-2005 MontaVista Software Inc.
*
* Eugene Surovegin <[email protected]> or <[email protected]>
* Copyright (c) 2003-2005 Zultys Technologies
*
* Rewritten and ported to the merged powerpc tree:
* Copyright 2007 David Gibson <[email protected]>, IBM Corporation.
*/
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/rtc.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
#include <asm/pci-bridge.h>
#include <asm/ppc4xx.h>
static const struct of_device_id ebony_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{},
};
static int __init ebony_device_probe(void)
{
of_platform_bus_probe(NULL, ebony_of_bus, NULL);
of_instantiate_rtc();
return 0;
}
machine_device_initcall(ebony, ebony_device_probe);
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
static int __init ebony_probe(void)
{
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
define_machine(ebony) {
.name = "Ebony",
.compatible = "ibm,ebony",
.probe = ebony_probe,
.progress = udbg_progress,
.init_IRQ = uic_init_tree,
.get_irq = uic_get_irq,
.restart = ppc4xx_reset_system,
};
| linux-master | arch/powerpc/platforms/44x/ebony.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PowerPC 476FPE board specific routines
*
* Copyright © 2013 Tony Breeds IBM Corporation
* Copyright © 2013 Alistair Popple IBM Corporation
*
* Based on earlier code:
* Matt Porter <[email protected]>
* Copyright 2002-2005 MontaVista Software Inc.
*
* Eugene Surovegin <[email protected]> or <[email protected]>
* Copyright (c) 2003-2005 Zultys Technologies
*
* Rewritten and ported to the merged powerpc tree:
* Copyright 2007 David Gibson <[email protected]>, IBM Corporation.
* Copyright © 2011 David Kliekamp IBM Corporation
*/
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/rtc.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
#include <asm/ppc4xx.h>
#include <asm/mpic.h>
#include <asm/mmu.h>
#include <asm/swiotlb.h>
#include <linux/pci.h>
#include <linux/i2c.h>
static const struct of_device_id ppc47x_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,plb6", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{},
};
/* The EEPROM is missing and the default values are bogus. This forces USB in
* to EHCI mode */
static void quirk_ppc_currituck_usb_fixup(struct pci_dev *dev)
{
if (of_machine_is_compatible("ibm,currituck")) {
pci_write_config_dword(dev, 0xe0, 0x0114231f);
pci_write_config_dword(dev, 0xe4, 0x00006c40);
}
}
DECLARE_PCI_FIXUP_HEADER(0x1033, 0x0035, quirk_ppc_currituck_usb_fixup);
/* Akebono has an AVR microcontroller attached to the I2C bus
* which is used to power off/reset the system. */
/* AVR I2C Commands */
#define AVR_PWRCTL_CMD (0x26)
/* Flags for the power control I2C commands */
#define AVR_PWRCTL_PWROFF (0x01)
#define AVR_PWRCTL_RESET (0x02)
static struct i2c_client *avr_i2c_client;
static void __noreturn avr_halt_system(int pwrctl_flags)
{
/* Request the AVR to reset the system */
i2c_smbus_write_byte_data(avr_i2c_client,
AVR_PWRCTL_CMD, pwrctl_flags);
/* Wait for system to be reset */
while (1)
;
}
static void avr_power_off_system(void)
{
avr_halt_system(AVR_PWRCTL_PWROFF);
}
static void __noreturn avr_reset_system(char *cmd)
{
avr_halt_system(AVR_PWRCTL_RESET);
}
static int avr_probe(struct i2c_client *client)
{
avr_i2c_client = client;
ppc_md.restart = avr_reset_system;
pm_power_off = avr_power_off_system;
return 0;
}
static const struct i2c_device_id avr_id[] = {
{ "akebono-avr", 0 },
{ }
};
static struct i2c_driver avr_driver = {
.driver = {
.name = "akebono-avr",
},
.probe = avr_probe,
.id_table = avr_id,
};
static int __init ppc47x_device_probe(void)
{
i2c_add_driver(&avr_driver);
of_platform_bus_probe(NULL, ppc47x_of_bus, NULL);
return 0;
}
machine_device_initcall(ppc47x_akebono, ppc47x_device_probe);
machine_device_initcall(ppc47x_currituck, ppc47x_device_probe);
static void __init ppc47x_init_irq(void)
{
struct device_node *np;
/* Find top level interrupt controller */
for_each_node_with_property(np, "interrupt-controller") {
if (!of_property_present(np, "interrupts"))
break;
}
if (np == NULL)
panic("Can't find top level interrupt controller");
/* Check type and do appropriate initialization */
if (of_device_is_compatible(np, "chrp,open-pic")) {
/* The MPIC driver will get everything it needs from the
* device-tree, just pass 0 to all arguments
*/
struct mpic *mpic =
mpic_alloc(np, 0, MPIC_NO_RESET, 0, 0, " MPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
ppc_md.get_irq = mpic_get_irq;
} else
panic("Unrecognized top level interrupt controller");
of_node_put(np);
}
#ifdef CONFIG_SMP
static void smp_ppc47x_setup_cpu(int cpu)
{
mpic_setup_this_cpu();
}
static int smp_ppc47x_kick_cpu(int cpu)
{
struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
const u64 *spin_table_addr_prop;
u32 *spin_table;
extern void start_secondary_47x(void);
BUG_ON(cpunode == NULL);
/* Assume spin table. We could test for the enable-method in
* the device-tree but currently there's little point as it's
* our only supported method
*/
spin_table_addr_prop =
of_get_property(cpunode, "cpu-release-addr", NULL);
if (spin_table_addr_prop == NULL) {
pr_err("CPU%d: Can't start, missing cpu-release-addr !\n",
cpu);
return 1;
}
/* Assume it's mapped as part of the linear mapping. This is a bit
* fishy but will work fine for now
*
* XXX: Is there any reason to assume differently?
*/
spin_table = (u32 *)__va(*spin_table_addr_prop);
pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
spin_table[3] = cpu;
smp_wmb();
spin_table[1] = __pa(start_secondary_47x);
mb();
return 0;
}
static struct smp_ops_t ppc47x_smp_ops = {
.probe = smp_mpic_probe,
.message_pass = smp_mpic_message_pass,
.setup_cpu = smp_ppc47x_setup_cpu,
.kick_cpu = smp_ppc47x_kick_cpu,
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
};
static void __init ppc47x_smp_init(void)
{
if (mmu_has_feature(MMU_FTR_TYPE_47x))
smp_ops = &ppc47x_smp_ops;
}
#else /* CONFIG_SMP */
static void __init ppc47x_smp_init(void) { }
#endif /* CONFIG_SMP */
static void __init ppc47x_setup_arch(void)
{
/* No need to check the DMA config as we /know/ our windows are all of
* RAM. Lets hope that doesn't change */
swiotlb_detect_4g();
ppc47x_smp_init();
}
static int board_rev = -1;
static int __init ppc47x_get_board_rev(void)
{
int reg;
u8 __iomem *fpga;
struct device_node *np = NULL;
if (of_machine_is_compatible("ibm,currituck")) {
np = of_find_compatible_node(NULL, NULL, "ibm,currituck-fpga");
reg = 0;
} else if (of_machine_is_compatible("ibm,akebono")) {
np = of_find_compatible_node(NULL, NULL, "ibm,akebono-fpga");
reg = 2;
}
if (!np)
goto fail;
fpga = of_iomap(np, 0);
of_node_put(np);
if (!fpga)
goto fail;
board_rev = ioread8(fpga + reg) & 0x03;
pr_info("%s: Found board revision %d\n", __func__, board_rev);
iounmap(fpga);
return 0;
fail:
pr_info("%s: Unable to find board revision\n", __func__);
return 0;
}
machine_arch_initcall(ppc47x_akebono, ppc47x_get_board_rev);
machine_arch_initcall(ppc47x_currituck, ppc47x_get_board_rev);
/* Use USB controller should have been hardware swizzled but it wasn't :( */
static void ppc47x_pci_irq_fixup(struct pci_dev *dev)
{
if (dev->vendor == 0x1033 && (dev->device == 0x0035 ||
dev->device == 0x00e0)) {
if (board_rev == 0) {
dev->irq = irq_create_mapping(NULL, 47);
pr_info("%s: Mapping irq %d\n", __func__, dev->irq);
} else if (board_rev == 2) {
dev->irq = irq_create_mapping(NULL, 49);
pr_info("%s: Mapping irq %d\n", __func__, dev->irq);
} else {
pr_alert("%s: Unknown board revision\n", __func__);
}
}
}
define_machine(ppc47x_akebono) {
.name = "PowerPC 47x (akebono)",
.compatible = "ibm,akebono",
.progress = udbg_progress,
.init_IRQ = ppc47x_init_irq,
.setup_arch = ppc47x_setup_arch,
.restart = ppc4xx_reset_system,
};
define_machine(ppc47x_currituck) {
.name = "PowerPC 47x (currituck)",
.compatible = "ibm,currituck",
.progress = udbg_progress,
.init_IRQ = ppc47x_init_irq,
.pci_irq_fixup = ppc47x_pci_irq_fixup,
.setup_arch = ppc47x_setup_arch,
.restart = ppc4xx_reset_system,
};
| linux-master | arch/powerpc/platforms/44x/ppc476.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PIKA Warp(tm) board specific routines
*
* Copyright (c) 2008-2009 PIKA Technologies
* Sean MacLennan <[email protected]>
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/kthread.h>
#include <linux/leds.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
#include <asm/ppc4xx.h>
#include <asm/dma.h>
static const struct of_device_id warp_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{},
};
static int __init warp_device_probe(void)
{
of_platform_bus_probe(NULL, warp_of_bus, NULL);
return 0;
}
machine_device_initcall(warp, warp_device_probe);
define_machine(warp) {
.name = "Warp",
.compatible = "pika,warp",
.progress = udbg_progress,
.init_IRQ = uic_init_tree,
.get_irq = uic_get_irq,
.restart = ppc4xx_reset_system,
};
static int __init warp_post_info(void)
{
struct device_node *np;
void __iomem *fpga;
u32 post1, post2;
/* Sighhhh... POST information is in the sd area. */
np = of_find_compatible_node(NULL, NULL, "pika,fpga-sd");
if (np == NULL)
return -ENOENT;
fpga = of_iomap(np, 0);
of_node_put(np);
if (fpga == NULL)
return -ENOENT;
post1 = in_be32(fpga + 0x40);
post2 = in_be32(fpga + 0x44);
iounmap(fpga);
if (post1 || post2)
printk(KERN_INFO "Warp POST %08x %08x\n", post1, post2);
else
printk(KERN_INFO "Warp POST OK\n");
return 0;
}
#ifdef CONFIG_SENSORS_AD7414
static void __iomem *dtm_fpga;
#define WARP_GREEN_LED 0
#define WARP_RED_LED 1
static struct gpio_led warp_gpio_led_pins[] = {
[WARP_GREEN_LED] = {
.name = "green",
.default_state = LEDS_DEFSTATE_KEEP,
.gpiod = NULL, /* to be filled by pika_setup_leds() */
},
[WARP_RED_LED] = {
.name = "red",
.default_state = LEDS_DEFSTATE_KEEP,
.gpiod = NULL, /* to be filled by pika_setup_leds() */
},
};
static struct gpio_led_platform_data warp_gpio_led_data = {
.leds = warp_gpio_led_pins,
.num_leds = ARRAY_SIZE(warp_gpio_led_pins),
};
static struct platform_device warp_gpio_leds = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &warp_gpio_led_data,
},
};
static irqreturn_t temp_isr(int irq, void *context)
{
int value = 1;
local_irq_disable();
gpiod_set_value(warp_gpio_led_pins[WARP_GREEN_LED].gpiod, 0);
printk(KERN_EMERG "\n\nCritical Temperature Shutdown\n\n");
while (1) {
if (dtm_fpga) {
unsigned reset = in_be32(dtm_fpga + 0x14);
out_be32(dtm_fpga + 0x14, reset);
}
gpiod_set_value(warp_gpio_led_pins[WARP_RED_LED].gpiod, value);
value ^= 1;
mdelay(500);
}
/* Not reached */
return IRQ_HANDLED;
}
/*
* Because green and red power LEDs are normally driven by leds-gpio driver,
* but in case of critical temperature shutdown we want to drive them
* ourselves, we acquire both and then create leds-gpio platform device
* ourselves, instead of doing it through device tree. This way we can still
* keep access to the gpios and use them when needed.
*/
static int pika_setup_leds(void)
{
struct device_node *np, *child;
struct gpio_desc *gpio;
struct gpio_led *led;
int led_count = 0;
int error;
int i;
np = of_find_compatible_node(NULL, NULL, "warp-power-leds");
if (!np) {
printk(KERN_ERR __FILE__ ": Unable to find leds\n");
return -ENOENT;
}
for_each_child_of_node(np, child) {
for (i = 0; i < ARRAY_SIZE(warp_gpio_led_pins); i++) {
led = &warp_gpio_led_pins[i];
if (!of_node_name_eq(child, led->name))
continue;
if (led->gpiod) {
printk(KERN_ERR __FILE__ ": %s led has already been defined\n",
led->name);
continue;
}
gpio = fwnode_gpiod_get_index(of_fwnode_handle(child),
NULL, 0, GPIOD_ASIS,
led->name);
error = PTR_ERR_OR_ZERO(gpio);
if (error) {
printk(KERN_ERR __FILE__ ": Failed to get %s led gpio: %d\n",
led->name, error);
of_node_put(child);
goto err_cleanup_pins;
}
led->gpiod = gpio;
led_count++;
}
}
of_node_put(np);
/* Skip device registration if no leds have been defined */
if (led_count) {
error = platform_device_register(&warp_gpio_leds);
if (error) {
printk(KERN_ERR __FILE__ ": Unable to add leds-gpio: %d\n",
error);
goto err_cleanup_pins;
}
}
return 0;
err_cleanup_pins:
for (i = 0; i < ARRAY_SIZE(warp_gpio_led_pins); i++) {
led = &warp_gpio_led_pins[i];
gpiod_put(led->gpiod);
led->gpiod = NULL;
}
return error;
}
static void pika_setup_critical_temp(struct device_node *np,
struct i2c_client *client)
{
int irq, rc;
/* Do this before enabling critical temp interrupt since we
* may immediately interrupt.
*/
pika_setup_leds();
/* These registers are in 1 degree increments. */
i2c_smbus_write_byte_data(client, 2, 65); /* Thigh */
i2c_smbus_write_byte_data(client, 3, 0); /* Tlow */
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
printk(KERN_ERR __FILE__ ": Unable to get ad7414 irq\n");
return;
}
rc = request_irq(irq, temp_isr, 0, "ad7414", NULL);
if (rc) {
printk(KERN_ERR __FILE__
": Unable to request ad7414 irq %d = %d\n", irq, rc);
return;
}
}
static inline void pika_dtm_check_fan(void __iomem *fpga)
{
static int fan_state;
u32 fan = in_be32(fpga + 0x34) & (1 << 14);
if (fan_state != fan) {
fan_state = fan;
if (fan)
printk(KERN_WARNING "Fan rotation error detected."
" Please check hardware.\n");
}
}
static int pika_dtm_thread(void __iomem *fpga)
{
struct device_node *np;
struct i2c_client *client;
np = of_find_compatible_node(NULL, NULL, "adi,ad7414");
if (np == NULL)
return -ENOENT;
client = of_find_i2c_device_by_node(np);
if (client == NULL) {
of_node_put(np);
return -ENOENT;
}
pika_setup_critical_temp(np, client);
of_node_put(np);
printk(KERN_INFO "Warp DTM thread running.\n");
while (!kthread_should_stop()) {
int val;
val = i2c_smbus_read_word_data(client, 0);
if (val < 0)
dev_dbg(&client->dev, "DTM read temp failed.\n");
else {
s16 temp = swab16(val);
out_be32(fpga + 0x20, temp);
}
pika_dtm_check_fan(fpga);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
}
return 0;
}
static int __init pika_dtm_start(void)
{
struct task_struct *dtm_thread;
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "pika,fpga");
if (np == NULL)
return -ENOENT;
dtm_fpga = of_iomap(np, 0);
of_node_put(np);
if (dtm_fpga == NULL)
return -ENOENT;
/* Must get post info before thread starts. */
warp_post_info();
dtm_thread = kthread_run(pika_dtm_thread, dtm_fpga, "pika-dtm");
if (IS_ERR(dtm_thread)) {
iounmap(dtm_fpga);
return PTR_ERR(dtm_thread);
}
return 0;
}
machine_late_initcall(warp, pika_dtm_start);
#else /* !CONFIG_SENSORS_AD7414 */
machine_late_initcall(warp, warp_post_info);
#endif
| linux-master | arch/powerpc/platforms/44x/warp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* FSP-2 board specific routines
*
* Based on earlier code:
* Matt Porter <[email protected]>
* Copyright 2002-2005 MontaVista Software Inc.
*
* Eugene Surovegin <[email protected]> or <[email protected]>
* Copyright (c) 2003-2005 Zultys Technologies
*
* Rewritten and ported to the merged powerpc tree:
* Copyright 2007 David Gibson <[email protected]>, IBM Corporation.
*/
#include <linux/init.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/rtc.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
#include <asm/ppc4xx.h>
#include <asm/dcr.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include "fsp2.h"
#define FSP2_BUS_ERR "ibm,bus-error-irq"
#define FSP2_CMU_ERR "ibm,cmu-error-irq"
#define FSP2_CONF_ERR "ibm,conf-error-irq"
#define FSP2_OPBD_ERR "ibm,opbd-error-irq"
#define FSP2_MCUE "ibm,mc-ue-irq"
#define FSP2_RST_WRN "ibm,reset-warning-irq"
static __initdata struct of_device_id fsp2_of_bus[] = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,plb6", },
{ .compatible = "ibm,opb", },
{},
};
static void l2regs(void)
{
pr_err("L2 Controller:\n");
pr_err("MCK: 0x%08x\n", mfl2(L2MCK));
pr_err("INT: 0x%08x\n", mfl2(L2INT));
pr_err("PLBSTAT0: 0x%08x\n", mfl2(L2PLBSTAT0));
pr_err("PLBSTAT1: 0x%08x\n", mfl2(L2PLBSTAT1));
pr_err("ARRSTAT0: 0x%08x\n", mfl2(L2ARRSTAT0));
pr_err("ARRSTAT1: 0x%08x\n", mfl2(L2ARRSTAT1));
pr_err("ARRSTAT2: 0x%08x\n", mfl2(L2ARRSTAT2));
pr_err("CPUSTAT: 0x%08x\n", mfl2(L2CPUSTAT));
pr_err("RACSTAT0: 0x%08x\n", mfl2(L2RACSTAT0));
pr_err("WACSTAT0: 0x%08x\n", mfl2(L2WACSTAT0));
pr_err("WACSTAT1: 0x%08x\n", mfl2(L2WACSTAT1));
pr_err("WACSTAT2: 0x%08x\n", mfl2(L2WACSTAT2));
pr_err("WDFSTAT: 0x%08x\n", mfl2(L2WDFSTAT));
pr_err("LOG0: 0x%08x\n", mfl2(L2LOG0));
pr_err("LOG1: 0x%08x\n", mfl2(L2LOG1));
pr_err("LOG2: 0x%08x\n", mfl2(L2LOG2));
pr_err("LOG3: 0x%08x\n", mfl2(L2LOG3));
pr_err("LOG4: 0x%08x\n", mfl2(L2LOG4));
pr_err("LOG5: 0x%08x\n", mfl2(L2LOG5));
}
static void show_plbopb_regs(u32 base, int num)
{
pr_err("\nPLBOPB Bridge %d:\n", num);
pr_err("GESR0: 0x%08x\n", mfdcr(base + PLB4OPB_GESR0));
pr_err("GESR1: 0x%08x\n", mfdcr(base + PLB4OPB_GESR1));
pr_err("GESR2: 0x%08x\n", mfdcr(base + PLB4OPB_GESR2));
pr_err("GEARU: 0x%08x\n", mfdcr(base + PLB4OPB_GEARU));
pr_err("GEAR: 0x%08x\n", mfdcr(base + PLB4OPB_GEAR));
}
static irqreturn_t bus_err_handler(int irq, void *data)
{
pr_err("Bus Error\n");
l2regs();
pr_err("\nPLB6 Controller:\n");
pr_err("BC_SHD: 0x%08x\n", mfdcr(DCRN_PLB6_SHD));
pr_err("BC_ERR: 0x%08x\n", mfdcr(DCRN_PLB6_ERR));
pr_err("\nPLB6-to-PLB4 Bridge:\n");
pr_err("ESR: 0x%08x\n", mfdcr(DCRN_PLB6PLB4_ESR));
pr_err("EARH: 0x%08x\n", mfdcr(DCRN_PLB6PLB4_EARH));
pr_err("EARL: 0x%08x\n", mfdcr(DCRN_PLB6PLB4_EARL));
pr_err("\nPLB4-to-PLB6 Bridge:\n");
pr_err("ESR: 0x%08x\n", mfdcr(DCRN_PLB4PLB6_ESR));
pr_err("EARH: 0x%08x\n", mfdcr(DCRN_PLB4PLB6_EARH));
pr_err("EARL: 0x%08x\n", mfdcr(DCRN_PLB4PLB6_EARL));
pr_err("\nPLB6-to-MCIF Bridge:\n");
pr_err("BESR0: 0x%08x\n", mfdcr(DCRN_PLB6MCIF_BESR0));
pr_err("BESR1: 0x%08x\n", mfdcr(DCRN_PLB6MCIF_BESR1));
pr_err("BEARH: 0x%08x\n", mfdcr(DCRN_PLB6MCIF_BEARH));
pr_err("BEARL: 0x%08x\n", mfdcr(DCRN_PLB6MCIF_BEARL));
pr_err("\nPLB4 Arbiter:\n");
pr_err("P0ESRH 0x%08x\n", mfdcr(DCRN_PLB4_P0ESRH));
pr_err("P0ESRL 0x%08x\n", mfdcr(DCRN_PLB4_P0ESRL));
pr_err("P0EARH 0x%08x\n", mfdcr(DCRN_PLB4_P0EARH));
pr_err("P0EARH 0x%08x\n", mfdcr(DCRN_PLB4_P0EARH));
pr_err("P1ESRH 0x%08x\n", mfdcr(DCRN_PLB4_P1ESRH));
pr_err("P1ESRL 0x%08x\n", mfdcr(DCRN_PLB4_P1ESRL));
pr_err("P1EARH 0x%08x\n", mfdcr(DCRN_PLB4_P1EARH));
pr_err("P1EARH 0x%08x\n", mfdcr(DCRN_PLB4_P1EARH));
show_plbopb_regs(DCRN_PLB4OPB0_BASE, 0);
show_plbopb_regs(DCRN_PLB4OPB1_BASE, 1);
show_plbopb_regs(DCRN_PLB4OPB2_BASE, 2);
show_plbopb_regs(DCRN_PLB4OPB3_BASE, 3);
pr_err("\nPLB4-to-AHB Bridge:\n");
pr_err("ESR: 0x%08x\n", mfdcr(DCRN_PLB4AHB_ESR));
pr_err("SEUAR: 0x%08x\n", mfdcr(DCRN_PLB4AHB_SEUAR));
pr_err("SELAR: 0x%08x\n", mfdcr(DCRN_PLB4AHB_SELAR));
pr_err("\nAHB-to-PLB4 Bridge:\n");
pr_err("\nESR: 0x%08x\n", mfdcr(DCRN_AHBPLB4_ESR));
pr_err("\nEAR: 0x%08x\n", mfdcr(DCRN_AHBPLB4_EAR));
panic("Bus Error\n");
}
static irqreturn_t cmu_err_handler(int irq, void *data) {
pr_err("CMU Error\n");
pr_err("FIR0: 0x%08x\n", mfcmu(CMUN_FIR0));
panic("CMU Error\n");
}
static irqreturn_t conf_err_handler(int irq, void *data) {
pr_err("Configuration Logic Error\n");
pr_err("CONF_FIR: 0x%08x\n", mfdcr(DCRN_CONF_FIR_RWC));
pr_err("RPERR0: 0x%08x\n", mfdcr(DCRN_CONF_RPERR0));
pr_err("RPERR1: 0x%08x\n", mfdcr(DCRN_CONF_RPERR1));
panic("Configuration Logic Error\n");
}
static irqreturn_t opbd_err_handler(int irq, void *data) {
panic("OPBD Error\n");
}
static irqreturn_t mcue_handler(int irq, void *data) {
pr_err("DDR: Uncorrectable Error\n");
pr_err("MCSTAT: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_MCSTAT));
pr_err("MCOPT1: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_MCOPT1));
pr_err("MCOPT2: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_MCOPT2));
pr_err("PHYSTAT: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_PHYSTAT));
pr_err("CFGR0: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_CFGR0));
pr_err("CFGR1: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_CFGR1));
pr_err("CFGR2: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_CFGR2));
pr_err("CFGR3: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_CFGR3));
pr_err("SCRUB_CNTL: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_SCRUB_CNTL));
pr_err("ECCERR_PORT0: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_ECCERR_PORT0));
pr_err("ECCERR_ADDR_PORT0: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_ECCERR_ADDR_PORT0));
pr_err("ECCERR_CNT_PORT0: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_ECCERR_COUNT_PORT0));
pr_err("ECC_CHECK_PORT0: 0x%08x\n",
mfdcr(DCRN_DDR34_BASE + DCRN_DDR34_ECC_CHECK_PORT0));
pr_err("MCER0: 0x%08x\n",
mfdcr(DCRN_CW_BASE + DCRN_CW_MCER0));
pr_err("MCER1: 0x%08x\n",
mfdcr(DCRN_CW_BASE + DCRN_CW_MCER1));
pr_err("BESR: 0x%08x\n",
mfdcr(DCRN_PLB6MCIF_BESR0));
pr_err("BEARL: 0x%08x\n",
mfdcr(DCRN_PLB6MCIF_BEARL));
pr_err("BEARH: 0x%08x\n",
mfdcr(DCRN_PLB6MCIF_BEARH));
panic("DDR: Uncorrectable Error\n");
}
static irqreturn_t rst_wrn_handler(int irq, void *data) {
u32 crcs = mfcmu(CMUN_CRCS);
switch (crcs & CRCS_STAT_MASK) {
case CRCS_STAT_CHIP_RST_B:
panic("Received chassis-initiated reset request");
default:
panic("Unknown external reset: CRCS=0x%x", crcs);
}
}
static void __init node_irq_request(const char *compat, irq_handler_t errirq_handler)
{
struct device_node *np;
unsigned int irq;
int32_t rc;
for_each_compatible_node(np, NULL, compat) {
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
pr_err("device tree node %pOFn is missing a interrupt",
np);
of_node_put(np);
return;
}
rc = request_irq(irq, errirq_handler, 0, np->name, np);
if (rc) {
pr_err("fsp_of_probe: request_irq failed: np=%pOF rc=%d",
np, rc);
of_node_put(np);
return;
}
}
}
static void __init critical_irq_setup(void)
{
node_irq_request(FSP2_CMU_ERR, cmu_err_handler);
node_irq_request(FSP2_BUS_ERR, bus_err_handler);
node_irq_request(FSP2_CONF_ERR, conf_err_handler);
node_irq_request(FSP2_OPBD_ERR, opbd_err_handler);
node_irq_request(FSP2_MCUE, mcue_handler);
node_irq_request(FSP2_RST_WRN, rst_wrn_handler);
}
static int __init fsp2_device_probe(void)
{
of_platform_bus_probe(NULL, fsp2_of_bus, NULL);
return 0;
}
machine_device_initcall(fsp2, fsp2_device_probe);
static int __init fsp2_probe(void)
{
u32 val;
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "ibm,fsp2"))
return 0;
/* Clear BC_ERR and mask snoopable request plb errors. */
val = mfdcr(DCRN_PLB6_CR0);
val |= 0x20000000;
mtdcr(DCRN_PLB6_BASE, val);
mtdcr(DCRN_PLB6_HD, 0xffff0000);
mtdcr(DCRN_PLB6_SHD, 0xffff0000);
/* TVSENSE reset is blocked (clock gated) by the POR default of the TVS
* sleep config bit. As a consequence, TVSENSE will provide erratic
* sensor values, which may result in spurious (parity) errors
* recorded in the CMU FIR and leading to erroneous interrupt requests
* once the CMU interrupt is unmasked.
*/
/* 1. set TVS1[UNDOZE] */
val = mfcmu(CMUN_TVS1);
val |= 0x4;
mtcmu(CMUN_TVS1, val);
/* 2. clear FIR[TVS] and FIR[TVSPAR] */
val = mfcmu(CMUN_FIR0);
val |= 0x30000000;
mtcmu(CMUN_FIR0, val);
/* L2 machine checks */
mtl2(L2PLBMCKEN0, 0xffffffff);
mtl2(L2PLBMCKEN1, 0x0000ffff);
mtl2(L2ARRMCKEN0, 0xffffffff);
mtl2(L2ARRMCKEN1, 0xffffffff);
mtl2(L2ARRMCKEN2, 0xfffff000);
mtl2(L2CPUMCKEN, 0xffffffff);
mtl2(L2RACMCKEN0, 0xffffffff);
mtl2(L2WACMCKEN0, 0xffffffff);
mtl2(L2WACMCKEN1, 0xffffffff);
mtl2(L2WACMCKEN2, 0xffffffff);
mtl2(L2WDFMCKEN, 0xffffffff);
/* L2 interrupts */
mtl2(L2PLBINTEN1, 0xffff0000);
/*
* At a global level, enable all L2 machine checks and interrupts
* reported by the L2 subsystems, except for the external machine check
* input (UIC0.1).
*/
mtl2(L2MCKEN, 0x000007ff);
mtl2(L2INTEN, 0x000004ff);
/* Enable FSP-2 configuration logic parity errors */
mtdcr(DCRN_CONF_EIR_RS, 0x80000000);
return 1;
}
static void __init fsp2_irq_init(void)
{
uic_init_tree();
critical_irq_setup();
}
define_machine(fsp2) {
.name = "FSP-2",
.probe = fsp2_probe,
.progress = udbg_progress,
.init_IRQ = fsp2_irq_init,
.get_irq = uic_get_irq,
.restart = ppc4xx_reset_system,
};
| linux-master | arch/powerpc/platforms/44x/fsp2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic PowerPC 44x platform support
*
* Copyright 2008 IBM Corporation
*
* This implements simple platform support for PowerPC 44x chips. This is
* mostly used for eval boards or other simple and "generic" 44x boards. If
* your board has custom functions or hardware, then you will likely want to
* implement your own board.c file to accommodate it.
*/
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/ppc4xx.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include <asm/uic.h>
#include <linux/init.h>
#include <linux/of_platform.h>
static const struct of_device_id ppc44x_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{ .compatible = "simple-bus", },
{},
};
static int __init ppc44x_device_probe(void)
{
of_platform_bus_probe(NULL, ppc44x_of_bus, NULL);
return 0;
}
machine_device_initcall(ppc44x_simple, ppc44x_device_probe);
/* This is the list of boards that can be supported by this simple
* platform code. This does _not_ mean the boards are compatible,
* as they most certainly are not from a device tree perspective.
* However, their differences are handled by the device tree and the
* drivers and therefore they don't need custom board support files.
*
* Again, if your board needs to do things differently then create a
* board.c file for it rather than adding it to this list.
*/
static char *board[] __initdata = {
"amcc,arches",
"amcc,bamboo",
"apm,bluestone",
"amcc,glacier",
"ibm,ebony",
"amcc,eiger",
"amcc,katmai",
"amcc,rainier",
"amcc,redwood",
"amcc,sequoia",
"amcc,taishan",
"amcc,yosemite",
"mosaixtech,icon"
};
static int __init ppc44x_probe(void)
{
int i = 0;
for (i = 0; i < ARRAY_SIZE(board); i++) {
if (of_machine_is_compatible(board[i])) {
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
}
return 0;
}
define_machine(ppc44x_simple) {
.name = "PowerPC 44x Platform",
.probe = ppc44x_probe,
.progress = udbg_progress,
.init_IRQ = uic_init_tree,
.get_irq = uic_get_irq,
.restart = ppc4xx_reset_system,
};
| linux-master | arch/powerpc/platforms/44x/ppc44x_simple.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* This contain platform specific code for APM PPC460EX based Canyonlands
* board.
*
* Copyright (c) 2010, Applied Micro Circuits Corporation
* Author: Rupjyoti Sarmah <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/pci-bridge.h>
#include <asm/ppc4xx.h>
#include <asm/udbg.h>
#include <asm/uic.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/delay.h>
#include "44x.h"
#define BCSR_USB_EN 0x11
static const struct of_device_id ppc460ex_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{ .compatible = "simple-bus", },
{},
};
static int __init ppc460ex_device_probe(void)
{
of_platform_bus_probe(NULL, ppc460ex_of_bus, NULL);
return 0;
}
machine_device_initcall(canyonlands, ppc460ex_device_probe);
/* Using this code only for the Canyonlands board. */
static int __init ppc460ex_probe(void)
{
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
/* USB PHY fixup code on Canyonlands kit. */
static int __init ppc460ex_canyonlands_fixup(void)
{
u8 __iomem *bcsr ;
void __iomem *vaddr;
struct device_node *np;
int ret = 0;
np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-bcsr");
if (!np) {
printk(KERN_ERR "failed did not find amcc, ppc460ex bcsr node\n");
return -ENODEV;
}
bcsr = of_iomap(np, 0);
of_node_put(np);
if (!bcsr) {
printk(KERN_CRIT "Could not remap bcsr\n");
ret = -ENODEV;
goto err_bcsr;
}
np = of_find_compatible_node(NULL, NULL, "ibm,ppc4xx-gpio");
if (!np) {
printk(KERN_ERR "failed did not find ibm,ppc4xx-gpio node\n");
return -ENODEV;
}
vaddr = of_iomap(np, 0);
of_node_put(np);
if (!vaddr) {
printk(KERN_CRIT "Could not get gpio node address\n");
ret = -ENODEV;
goto err_gpio;
}
/* Disable USB, through the BCSR7 bits */
setbits8(&bcsr[7], BCSR_USB_EN);
/* Wait for a while after reset */
msleep(100);
/* Enable USB here */
clrbits8(&bcsr[7], BCSR_USB_EN);
/*
* Configure multiplexed gpio16 and gpio19 as alternate1 output
* source after USB reset. In this configuration gpio16 will be
* USB2HStop and gpio19 will be USB2DStop. For more details refer to
* table 34-7 of PPC460EX user manual.
*/
setbits32((vaddr + GPIO0_OSRH), 0x42000000);
setbits32((vaddr + GPIO0_TSRH), 0x42000000);
err_gpio:
iounmap(vaddr);
err_bcsr:
iounmap(bcsr);
return ret;
}
machine_device_initcall(canyonlands, ppc460ex_canyonlands_fixup);
define_machine(canyonlands) {
.name = "Canyonlands",
.compatible = "amcc,canyonlands",
.probe = ppc460ex_probe,
.progress = udbg_progress,
.init_IRQ = uic_init_tree,
.get_irq = uic_get_irq,
.restart = ppc4xx_reset_system,
};
| linux-master | arch/powerpc/platforms/44x/canyonlands.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Sam440ep board specific routines based off bamboo.c code
* original copyrights below
*
* Wade Farnsworth <[email protected]>
* Copyright 2004 MontaVista Software Inc.
*
* Rewritten and ported to the merged powerpc tree:
* Josh Boyer <[email protected]>
* Copyright 2007 IBM Corporation
*
* Modified from bamboo.c for sam440ep:
* Copyright 2008 Giuseppe Coviello <[email protected]>
*/
#include <linux/init.h>
#include <linux/of_platform.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
#include <asm/pci-bridge.h>
#include <asm/ppc4xx.h>
#include <linux/i2c.h>
static const struct of_device_id sam440ep_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{},
};
static int __init sam440ep_device_probe(void)
{
of_platform_bus_probe(NULL, sam440ep_of_bus, NULL);
return 0;
}
machine_device_initcall(sam440ep, sam440ep_device_probe);
static int __init sam440ep_probe(void)
{
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
define_machine(sam440ep) {
.name = "Sam440ep",
.compatible = "acube,sam440ep",
.probe = sam440ep_probe,
.progress = udbg_progress,
.init_IRQ = uic_init_tree,
.get_irq = uic_get_irq,
.restart = ppc4xx_reset_system,
};
static struct i2c_board_info sam440ep_rtc_info = {
.type = "m41st85",
.addr = 0x68,
.irq = -1,
};
static int __init sam440ep_setup_rtc(void)
{
return i2c_register_board_info(0, &sam440ep_rtc_info, 1);
}
machine_device_initcall(sam440ep, sam440ep_setup_rtc);
| linux-master | arch/powerpc/platforms/44x/sam440ep.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 IBM Corp.
*
* Based on arch/powerpc/platforms/pasemi/idle.c:
* Copyright (C) 2006-2007 PA Semi, Inc
*
* Added by: Jerone Young <[email protected]>
*/
#include <linux/of.h>
#include <linux/kernel.h>
#include <asm/machdep.h>
static int mode_spin;
static void ppc44x_idle(void)
{
unsigned long msr_save;
msr_save = mfmsr();
/* set wait state MSR */
mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE);
isync();
/* return to initial state */
mtmsr(msr_save);
isync();
}
int __init ppc44x_idle_init(void)
{
if (!mode_spin) {
/* If we are not setting spin mode
then we set to wait mode */
ppc_md.power_save = &ppc44x_idle;
}
return 0;
}
arch_initcall(ppc44x_idle_init);
static int __init idle_param(char *p)
{
if (!strcmp("spin", p)) {
mode_spin = 1;
ppc_md.power_save = NULL;
}
return 0;
}
early_param("idle", idle_param);
| linux-master | arch/powerpc/platforms/44x/idle.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
*/
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/ptrace.h>
#include <asm/reg.h>
#include <asm/cacheflush.h>
int machine_check_440A(struct pt_regs *regs)
{
unsigned long reason = regs->esr;
printk("Machine check in kernel mode.\n");
if (reason & ESR_IMCP){
printk("Instruction Synchronous Machine Check exception\n");
mtspr(SPRN_ESR, reason & ~ESR_IMCP);
}
else {
u32 mcsr = mfspr(SPRN_MCSR);
if (mcsr & MCSR_IB)
printk("Instruction Read PLB Error\n");
if (mcsr & MCSR_DRB)
printk("Data Read PLB Error\n");
if (mcsr & MCSR_DWB)
printk("Data Write PLB Error\n");
if (mcsr & MCSR_TLBP)
printk("TLB Parity Error\n");
if (mcsr & MCSR_ICP){
flush_instruction_cache();
printk("I-Cache Parity Error\n");
}
if (mcsr & MCSR_DCSP)
printk("D-Cache Search Parity Error\n");
if (mcsr & MCSR_DCFP)
printk("D-Cache Flush Parity Error\n");
if (mcsr & MCSR_IMPE)
printk("Machine Check exception is imprecise\n");
/* Clear MCSR */
mtspr(SPRN_MCSR, mcsr);
}
return 0;
}
#ifdef CONFIG_PPC_47x
int machine_check_47x(struct pt_regs *regs)
{
unsigned long reason = regs->esr;
u32 mcsr;
printk(KERN_ERR "Machine check in kernel mode.\n");
if (reason & ESR_IMCP) {
printk(KERN_ERR "Instruction Synchronous Machine Check exception\n");
mtspr(SPRN_ESR, reason & ~ESR_IMCP);
return 0;
}
mcsr = mfspr(SPRN_MCSR);
if (mcsr & MCSR_IB)
printk(KERN_ERR "Instruction Read PLB Error\n");
if (mcsr & MCSR_DRB)
printk(KERN_ERR "Data Read PLB Error\n");
if (mcsr & MCSR_DWB)
printk(KERN_ERR "Data Write PLB Error\n");
if (mcsr & MCSR_TLBP)
printk(KERN_ERR "TLB Parity Error\n");
if (mcsr & MCSR_ICP) {
flush_instruction_cache();
printk(KERN_ERR "I-Cache Parity Error\n");
}
if (mcsr & MCSR_DCSP)
printk(KERN_ERR "D-Cache Search Parity Error\n");
if (mcsr & PPC47x_MCSR_GPR)
printk(KERN_ERR "GPR Parity Error\n");
if (mcsr & PPC47x_MCSR_FPR)
printk(KERN_ERR "FPR Parity Error\n");
if (mcsr & PPC47x_MCSR_IPR)
printk(KERN_ERR "Machine Check exception is imprecise\n");
/* Clear MCSR */
mtspr(SPRN_MCSR, mcsr);
return 0;
}
#endif /* CONFIG_PPC_47x */
| linux-master | arch/powerpc/platforms/44x/machine_check.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PPC476 board specific routines
*
* Copyright 2010 Torez Smith, IBM Corporation.
*
* Based on earlier code:
* Matt Porter <[email protected]>
* Copyright 2002-2005 MontaVista Software Inc.
*
* Eugene Surovegin <[email protected]> or <[email protected]>
* Copyright (c) 2003-2005 Zultys Technologies
*
* Rewritten and ported to the merged powerpc tree:
* Copyright 2007 David Gibson <[email protected]>, IBM Corporation.
*/
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/rtc.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
#include <asm/ppc4xx.h>
#include <asm/mpic.h>
#include <asm/mmu.h>
static const struct of_device_id iss4xx_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,plb6", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{},
};
static int __init iss4xx_device_probe(void)
{
of_platform_bus_probe(NULL, iss4xx_of_bus, NULL);
of_instantiate_rtc();
return 0;
}
machine_device_initcall(iss4xx, iss4xx_device_probe);
/* We can have either UICs or MPICs */
static void __init iss4xx_init_irq(void)
{
struct device_node *np;
/* Find top level interrupt controller */
for_each_node_with_property(np, "interrupt-controller") {
if (!of_property_present(np, "interrupts"))
break;
}
if (np == NULL)
panic("Can't find top level interrupt controller");
/* Check type and do appropriate initialization */
if (of_device_is_compatible(np, "ibm,uic")) {
uic_init_tree();
ppc_md.get_irq = uic_get_irq;
#ifdef CONFIG_MPIC
} else if (of_device_is_compatible(np, "chrp,open-pic")) {
/* The MPIC driver will get everything it needs from the
* device-tree, just pass 0 to all arguments
*/
struct mpic *mpic = mpic_alloc(np, 0, MPIC_NO_RESET, 0, 0, " MPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
ppc_md.get_irq = mpic_get_irq;
#endif
} else
panic("Unrecognized top level interrupt controller");
}
#ifdef CONFIG_SMP
static void smp_iss4xx_setup_cpu(int cpu)
{
mpic_setup_this_cpu();
}
static int smp_iss4xx_kick_cpu(int cpu)
{
struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
const u64 *spin_table_addr_prop;
u32 *spin_table;
extern void start_secondary_47x(void);
BUG_ON(cpunode == NULL);
/* Assume spin table. We could test for the enable-method in
* the device-tree but currently there's little point as it's
* our only supported method
*/
spin_table_addr_prop = of_get_property(cpunode, "cpu-release-addr",
NULL);
if (spin_table_addr_prop == NULL) {
pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu);
return -ENOENT;
}
/* Assume it's mapped as part of the linear mapping. This is a bit
* fishy but will work fine for now
*/
spin_table = (u32 *)__va(*spin_table_addr_prop);
pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
spin_table[3] = cpu;
smp_wmb();
spin_table[1] = __pa(start_secondary_47x);
mb();
return 0;
}
static struct smp_ops_t iss_smp_ops = {
.probe = smp_mpic_probe,
.message_pass = smp_mpic_message_pass,
.setup_cpu = smp_iss4xx_setup_cpu,
.kick_cpu = smp_iss4xx_kick_cpu,
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
};
static void __init iss4xx_smp_init(void)
{
if (mmu_has_feature(MMU_FTR_TYPE_47x))
smp_ops = &iss_smp_ops;
}
#else /* CONFIG_SMP */
static void __init iss4xx_smp_init(void) { }
#endif /* CONFIG_SMP */
static void __init iss4xx_setup_arch(void)
{
iss4xx_smp_init();
}
define_machine(iss4xx) {
.name = "ISS-4xx",
.compatible = "ibm,iss-4xx",
.progress = udbg_progress,
.init_IRQ = iss4xx_init_irq,
.setup_arch = iss4xx_setup_arch,
.restart = ppc4xx_reset_system,
};
| linux-master | arch/powerpc/platforms/44x/iss4xx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* VAS user space API for its accelerators (Only NX-GZIP is supported now)
* Copyright (C) 2019 Haren Myneni, IBM Corp
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/kthread.h>
#include <linux/sched/signal.h>
#include <linux/mmu_context.h>
#include <linux/io.h>
#include <asm/vas.h>
#include <uapi/asm/vas-api.h>
/*
* The driver creates the device node that can be used as follows:
* For NX-GZIP
*
* fd = open("/dev/crypto/nx-gzip", O_RDWR);
* rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr);
* paste_addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, fd, 0ULL).
* vas_copy(&crb, 0, 1);
* vas_paste(paste_addr, 0, 1);
* close(fd) or exit process to close window.
*
* where "vas_copy" and "vas_paste" are defined in copy-paste.h.
* copy/paste returns to the user space directly. So refer NX hardware
* documentation for exact copy/paste usage and completion / error
* conditions.
*/
/*
* Wrapper object for the nx-gzip device - there is just one instance of
* this node for the whole system.
*/
static struct coproc_dev {
struct cdev cdev;
struct device *device;
char *name;
dev_t devt;
struct class *class;
enum vas_cop_type cop_type;
const struct vas_user_win_ops *vops;
} coproc_device;
struct coproc_instance {
struct coproc_dev *coproc;
struct vas_window *txwin;
};
static char *coproc_devnode(const struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev));
}
/*
* Take reference to pid and mm
*/
int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
{
/*
* Window opened by a child thread may not be closed when
* it exits. So take reference to its pid and release it
* when the window is free by parent thread.
* Acquire a reference to the task's pid to make sure
* pid will not be re-used - needed only for multithread
* applications.
*/
task_ref->pid = get_task_pid(current, PIDTYPE_PID);
/*
* Acquire a reference to the task's mm.
*/
task_ref->mm = get_task_mm(current);
if (!task_ref->mm) {
put_pid(task_ref->pid);
pr_err("VAS: pid(%d): mm_struct is not found\n",
current->pid);
return -EPERM;
}
mmgrab(task_ref->mm);
mmput(task_ref->mm);
/*
* Process closes window during exit. In the case of
* multithread application, the child thread can open
* window and can exit without closing it. So takes tgid
* reference until window closed to make sure tgid is not
* reused.
*/
task_ref->tgid = find_get_pid(task_tgid_vnr(current));
return 0;
}
/*
* Successful return must release the task reference with
* put_task_struct
*/
static bool ref_get_pid_and_task(struct vas_user_win_ref *task_ref,
struct task_struct **tskp, struct pid **pidp)
{
struct task_struct *tsk;
struct pid *pid;
pid = task_ref->pid;
tsk = get_pid_task(pid, PIDTYPE_PID);
if (!tsk) {
pid = task_ref->tgid;
tsk = get_pid_task(pid, PIDTYPE_PID);
/*
* Parent thread (tgid) will be closing window when it
* exits. So should not get here.
*/
if (WARN_ON_ONCE(!tsk))
return false;
}
/* Return if the task is exiting. */
if (tsk->flags & PF_EXITING) {
put_task_struct(tsk);
return false;
}
*tskp = tsk;
*pidp = pid;
return true;
}
/*
* Update the CSB to indicate a translation error.
*
* User space will be polling on CSB after the request is issued.
* If NX can handle the request without any issues, it updates CSB.
* Whereas if NX encounters page fault, the kernel will handle the
* fault and update CSB with translation error.
*
* If we are unable to update the CSB means copy_to_user failed due to
* invalid csb_addr, send a signal to the process.
*/
void vas_update_csb(struct coprocessor_request_block *crb,
struct vas_user_win_ref *task_ref)
{
struct coprocessor_status_block csb;
struct kernel_siginfo info;
struct task_struct *tsk;
void __user *csb_addr;
struct pid *pid;
int rc;
/*
* NX user space windows can not be opened for task->mm=NULL
* and faults will not be generated for kernel requests.
*/
if (WARN_ON_ONCE(!task_ref->mm))
return;
csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
memset(&csb, 0, sizeof(csb));
csb.cc = CSB_CC_FAULT_ADDRESS;
csb.ce = CSB_CE_TERMINATION;
csb.cs = 0;
csb.count = 0;
/*
* NX operates and returns in BE format as defined CRB struct.
* So saves fault_storage_addr in BE as NX pastes in FIFO and
* expects user space to convert to CPU format.
*/
csb.address = crb->stamp.nx.fault_storage_addr;
csb.flags = 0;
/*
* Process closes send window after all pending NX requests are
* completed. In multi-thread applications, a child thread can
* open a window and can exit without closing it. May be some
* requests are pending or this window can be used by other
* threads later. We should handle faults if NX encounters
* pages faults on these requests. Update CSB with translation
* error and fault address. If csb_addr passed by user space is
* invalid, send SEGV signal to pid saved in window. If the
* child thread is not running, send the signal to tgid.
* Parent thread (tgid) will close this window upon its exit.
*
* pid and mm references are taken when window is opened by
* process (pid). So tgid is used only when child thread opens
* a window and exits without closing it.
*/
if (!ref_get_pid_and_task(task_ref, &tsk, &pid))
return;
kthread_use_mm(task_ref->mm);
rc = copy_to_user(csb_addr, &csb, sizeof(csb));
/*
* User space polls on csb.flags (first byte). So add barrier
* then copy first byte with csb flags update.
*/
if (!rc) {
csb.flags = CSB_V;
/* Make sure update to csb.flags is visible now */
smp_mb();
rc = copy_to_user(csb_addr, &csb, sizeof(u8));
}
kthread_unuse_mm(task_ref->mm);
put_task_struct(tsk);
/* Success */
if (!rc)
return;
pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n",
csb_addr, pid_vnr(pid));
clear_siginfo(&info);
info.si_signo = SIGSEGV;
info.si_errno = EFAULT;
info.si_code = SEGV_MAPERR;
info.si_addr = csb_addr;
/*
* process will be polling on csb.flags after request is sent to
* NX. So generally CSB update should not fail except when an
* application passes invalid csb_addr. So an error message will
* be displayed and leave it to user space whether to ignore or
* handle this signal.
*/
rcu_read_lock();
rc = kill_pid_info(SIGSEGV, &info, pid);
rcu_read_unlock();
pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
pid_vnr(pid), rc);
}
void vas_dump_crb(struct coprocessor_request_block *crb)
{
struct data_descriptor_entry *dde;
struct nx_fault_stamp *nx;
dde = &crb->source;
pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
be64_to_cpu(dde->address), be32_to_cpu(dde->length),
dde->count, dde->index, dde->flags);
dde = &crb->target;
pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
be64_to_cpu(dde->address), be32_to_cpu(dde->length),
dde->count, dde->index, dde->flags);
nx = &crb->stamp.nx;
pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n",
be32_to_cpu(nx->pswid),
be64_to_cpu(crb->stamp.nx.fault_storage_addr),
nx->flags, nx->fault_status);
}
static int coproc_open(struct inode *inode, struct file *fp)
{
struct coproc_instance *cp_inst;
cp_inst = kzalloc(sizeof(*cp_inst), GFP_KERNEL);
if (!cp_inst)
return -ENOMEM;
cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev,
cdev);
fp->private_data = cp_inst;
return 0;
}
static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
{
void __user *uptr = (void __user *)arg;
struct vas_tx_win_open_attr uattr;
struct coproc_instance *cp_inst;
struct vas_window *txwin;
int rc;
cp_inst = fp->private_data;
/*
* One window for file descriptor
*/
if (cp_inst->txwin)
return -EEXIST;
rc = copy_from_user(&uattr, uptr, sizeof(uattr));
if (rc) {
pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
return -EFAULT;
}
if (uattr.version != 1) {
pr_err("Invalid window open API version\n");
return -EINVAL;
}
if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->open_win) {
pr_err("VAS API is not registered\n");
return -EACCES;
}
txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
cp_inst->coproc->cop_type);
if (IS_ERR(txwin)) {
pr_err("%s() VAS window open failed, %ld\n", __func__,
PTR_ERR(txwin));
return PTR_ERR(txwin);
}
mutex_init(&txwin->task_ref.mmap_mutex);
cp_inst->txwin = txwin;
return 0;
}
static int coproc_release(struct inode *inode, struct file *fp)
{
struct coproc_instance *cp_inst = fp->private_data;
int rc;
if (cp_inst->txwin) {
if (cp_inst->coproc->vops &&
cp_inst->coproc->vops->close_win) {
rc = cp_inst->coproc->vops->close_win(cp_inst->txwin);
if (rc)
return rc;
}
cp_inst->txwin = NULL;
}
kfree(cp_inst);
fp->private_data = NULL;
/*
* We don't know here if user has other receive windows
* open, so we can't really call clear_thread_tidr().
* So, once the process calls set_thread_tidr(), the
* TIDR value sticks around until process exits, resulting
* in an extra copy in restore_sprs().
*/
return 0;
}
/*
* If the executed instruction that caused the fault was a paste, then
* clear regs CR0[EQ], advance NIP, and return 0. Else return error code.
*/
static int do_fail_paste(void)
{
struct pt_regs *regs = current->thread.regs;
u32 instword;
if (WARN_ON_ONCE(!regs))
return -EINVAL;
if (WARN_ON_ONCE(!user_mode(regs)))
return -EINVAL;
/*
* If we couldn't translate the instruction, the driver should
* return success without handling the fault, it will be retried
* or the instruction fetch will fault.
*/
if (get_user(instword, (u32 __user *)(regs->nip)))
return -EAGAIN;
/*
* Not a paste instruction, driver may fail the fault.
*/
if ((instword & PPC_INST_PASTE_MASK) != PPC_INST_PASTE)
return -ENOENT;
regs->ccr &= ~0xe0000000; /* Clear CR0[0-2] to fail paste */
regs_add_return_ip(regs, 4); /* Emulate the paste */
return 0;
}
/*
* This fault handler is invoked when the core generates page fault on
* the paste address. Happens if the kernel closes window in hypervisor
* (on pseries) due to lost credit or the paste address is not mapped.
*/
static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct file *fp = vma->vm_file;
struct coproc_instance *cp_inst = fp->private_data;
struct vas_window *txwin;
vm_fault_t fault;
u64 paste_addr;
int ret;
/*
* window is not opened. Shouldn't expect this error.
*/
if (!cp_inst || !cp_inst->txwin) {
pr_err("%s(): Unexpected fault on paste address with TX window closed\n",
__func__);
return VM_FAULT_SIGBUS;
}
txwin = cp_inst->txwin;
/*
* When the LPAR lost credits due to core removal or during
* migration, invalidate the existing mapping for the current
* paste addresses and set windows in-active (zap_vma_pages in
* reconfig_close_windows()).
* New mapping will be done later after migration or new credits
* available. So continue to receive faults if the user space
* issue NX request.
*/
if (txwin->task_ref.vma != vmf->vma) {
pr_err("%s(): No previous mapping with paste address\n",
__func__);
return VM_FAULT_SIGBUS;
}
mutex_lock(&txwin->task_ref.mmap_mutex);
/*
* The window may be inactive due to lost credit (Ex: core
* removal with DLPAR). If the window is active again when
* the credit is available, map the new paste address at the
* window virtual address.
*/
if (txwin->status == VAS_WIN_ACTIVE) {
paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
if (paste_addr) {
fault = vmf_insert_pfn(vma, vma->vm_start,
(paste_addr >> PAGE_SHIFT));
mutex_unlock(&txwin->task_ref.mmap_mutex);
return fault;
}
}
mutex_unlock(&txwin->task_ref.mmap_mutex);
/*
* Received this fault due to closing the actual window.
* It can happen during migration or lost credits.
* Since no mapping, return the paste instruction failure
* to the user space.
*/
ret = do_fail_paste();
/*
* The user space can retry several times until success (needed
* for migration) or should fallback to SW compression or
* manage with the existing open windows if available.
* Looking at sysfs interface, it can determine whether these
* failures are coming during migration or core removal:
* nr_used_credits > nr_total_credits when lost credits
*/
if (!ret || (ret == -EAGAIN))
return VM_FAULT_NOPAGE;
return VM_FAULT_SIGBUS;
}
static const struct vm_operations_struct vas_vm_ops = {
.fault = vas_mmap_fault,
};
static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
{
struct coproc_instance *cp_inst = fp->private_data;
struct vas_window *txwin;
unsigned long pfn;
u64 paste_addr;
pgprot_t prot;
int rc;
txwin = cp_inst->txwin;
if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
(vma->vm_end - vma->vm_start), PAGE_SIZE);
return -EINVAL;
}
/* Ensure instance has an open send window */
if (!txwin) {
pr_err("%s(): No send window open?\n", __func__);
return -EINVAL;
}
if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) {
pr_err("%s(): VAS API is not registered\n", __func__);
return -EACCES;
}
/*
* The initial mmap is done after the window is opened
* with ioctl. But before mmap(), this window can be closed in
* the hypervisor due to lost credit (core removal on pseries).
* So if the window is not active, return mmap() failure with
* -EACCES and expects the user space reissue mmap() when it
* is active again or open new window when the credit is available.
* mmap_mutex protects the paste address mmap() with DLPAR
* close/open event and allows mmap() only when the window is
* active.
*/
mutex_lock(&txwin->task_ref.mmap_mutex);
if (txwin->status != VAS_WIN_ACTIVE) {
pr_err("%s(): Window is not active\n", __func__);
rc = -EACCES;
goto out;
}
paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
if (!paste_addr) {
pr_err("%s(): Window paste address failed\n", __func__);
rc = -EINVAL;
goto out;
}
pfn = paste_addr >> PAGE_SHIFT;
/* flags, page_prot from cxl_mmap(), except we want cachable */
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
vma->vm_end - vma->vm_start, prot);
pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
paste_addr, vma->vm_start, rc);
txwin->task_ref.vma = vma;
vma->vm_ops = &vas_vm_ops;
out:
mutex_unlock(&txwin->task_ref.mmap_mutex);
return rc;
}
static long coproc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case VAS_TX_WIN_OPEN:
return coproc_ioc_tx_win_open(fp, arg);
default:
return -EINVAL;
}
}
static struct file_operations coproc_fops = {
.open = coproc_open,
.release = coproc_release,
.mmap = coproc_mmap,
.unlocked_ioctl = coproc_ioctl,
};
/*
* Supporting only nx-gzip coprocessor type now, but this API code
* extended to other coprocessor types later.
*/
int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
const char *name,
const struct vas_user_win_ops *vops)
{
int rc = -EINVAL;
dev_t devno;
rc = alloc_chrdev_region(&coproc_device.devt, 1, 1, name);
if (rc) {
pr_err("Unable to allocate coproc major number: %i\n", rc);
return rc;
}
pr_devel("%s device allocated, dev [%i,%i]\n", name,
MAJOR(coproc_device.devt), MINOR(coproc_device.devt));
coproc_device.class = class_create(name);
if (IS_ERR(coproc_device.class)) {
rc = PTR_ERR(coproc_device.class);
pr_err("Unable to create %s class %d\n", name, rc);
goto err_class;
}
coproc_device.class->devnode = coproc_devnode;
coproc_device.cop_type = cop_type;
coproc_device.vops = vops;
coproc_fops.owner = mod;
cdev_init(&coproc_device.cdev, &coproc_fops);
devno = MKDEV(MAJOR(coproc_device.devt), 0);
rc = cdev_add(&coproc_device.cdev, devno, 1);
if (rc) {
pr_err("cdev_add() failed %d\n", rc);
goto err_cdev;
}
coproc_device.device = device_create(coproc_device.class, NULL,
devno, NULL, name, MINOR(devno));
if (IS_ERR(coproc_device.device)) {
rc = PTR_ERR(coproc_device.device);
pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc);
goto err;
}
pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
MINOR(devno));
return 0;
err:
cdev_del(&coproc_device.cdev);
err_cdev:
class_destroy(coproc_device.class);
err_class:
unregister_chrdev_region(coproc_device.devt, 1);
return rc;
}
void vas_unregister_coproc_api(void)
{
dev_t devno;
cdev_del(&coproc_device.cdev);
devno = MKDEV(MAJOR(coproc_device.devt), 0);
device_destroy(coproc_device.class, devno);
class_destroy(coproc_device.class);
unregister_chrdev_region(coproc_device.devt, 1);
}
| linux-master | arch/powerpc/platforms/book3s/vas-api.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GE SBC610 board support
*
* Author: Martyn Welch <[email protected]>
*
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* Based on: mpc86xx_hpcn.c (MPC86xx HPCN board specific routines)
* Copyright 2006 Freescale Semiconductor Inc.
*
* NEC fixup adapted from arch/mips/pci/fixup-lm2e.c
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/nvram.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/ge/ge_pic.h>
#include "mpc86xx.h"
#undef DEBUG
#ifdef DEBUG
#define DBG (fmt...) do { printk(KERN_ERR "SBC610: " fmt); } while (0)
#else
#define DBG (fmt...) do { } while (0)
#endif
void __iomem *sbc610_regs;
static void __init gef_sbc610_init_irq(void)
{
struct device_node *cascade_node = NULL;
mpc86xx_init_irq();
/*
* There is a simple interrupt handler in the main FPGA, this needs
* to be cascaded into the MPIC
*/
cascade_node = of_find_compatible_node(NULL, NULL, "gef,fpga-pic");
if (!cascade_node) {
printk(KERN_WARNING "SBC610: No FPGA PIC\n");
return;
}
gef_pic_init(cascade_node);
of_node_put(cascade_node);
}
static void __init gef_sbc610_setup_arch(void)
{
struct device_node *regs;
printk(KERN_INFO "GE Intelligent Platforms SBC610 6U VPX SBC\n");
#ifdef CONFIG_SMP
mpc86xx_smp_init();
#endif
fsl_pci_assign_primary();
/* Remap basic board registers */
regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs");
if (regs) {
sbc610_regs = of_iomap(regs, 0);
if (sbc610_regs == NULL)
printk(KERN_WARNING "Unable to map board registers\n");
of_node_put(regs);
}
#if defined(CONFIG_MMIO_NVRAM)
mmio_nvram_init();
#endif
}
/* Return the PCB revision */
static unsigned int gef_sbc610_get_pcb_rev(void)
{
unsigned int reg;
reg = ioread32(sbc610_regs);
return (reg >> 8) & 0xff;
}
/* Return the board (software) revision */
static unsigned int gef_sbc610_get_board_rev(void)
{
unsigned int reg;
reg = ioread32(sbc610_regs);
return (reg >> 16) & 0xff;
}
/* Return the FPGA revision */
static unsigned int gef_sbc610_get_fpga_rev(void)
{
unsigned int reg;
reg = ioread32(sbc610_regs);
return (reg >> 24) & 0xf;
}
static void gef_sbc610_show_cpuinfo(struct seq_file *m)
{
uint svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n");
seq_printf(m, "Revision\t: %u%c\n", gef_sbc610_get_pcb_rev(),
('A' + gef_sbc610_get_board_rev() - 1));
seq_printf(m, "FPGA Revision\t: %u\n", gef_sbc610_get_fpga_rev());
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
}
static void gef_sbc610_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
/* Do not do the fixup on other platforms! */
if (!machine_is(gef_sbc610))
return;
printk(KERN_INFO "Running NEC uPD720101 Fixup\n");
/* Ensure ports 1, 2, 3, 4 & 5 are enabled */
pci_read_config_dword(pdev, 0xe0, &val);
pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x5);
/* System clock is 48-MHz Oscillator and EHCI Enabled. */
pci_write_config_dword(pdev, 0xe4, 1 << 5);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
gef_sbc610_nec_fixup);
machine_arch_initcall(gef_sbc610, mpc86xx_common_publish_devices);
define_machine(gef_sbc610) {
.name = "GE SBC610",
.compatible = "gef,sbc610",
.setup_arch = gef_sbc610_setup_arch,
.init_IRQ = gef_sbc610_init_irq,
.show_cpuinfo = gef_sbc610_show_cpuinfo,
.get_irq = mpic_get_irq,
.time_init = mpc86xx_time_init,
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
};
| linux-master | arch/powerpc/platforms/86xx/gef_sbc610.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2008 Freescale Semiconductor, Inc.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <asm/mpic.h>
#include <asm/i8259.h>
#include "mpc86xx.h"
#ifdef CONFIG_PPC_I8259
static void mpc86xx_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = i8259_irq();
if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
#endif /* CONFIG_PPC_I8259 */
void __init mpc86xx_init_irq(void)
{
#ifdef CONFIG_PPC_I8259
struct device_node *np;
struct device_node *cascade_node = NULL;
int cascade_irq;
#endif
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " MPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
#ifdef CONFIG_PPC_I8259
/* Initialize i8259 controller */
for_each_node_by_type(np, "interrupt-controller")
if (of_device_is_compatible(np, "chrp,iic")) {
cascade_node = np;
break;
}
if (cascade_node == NULL) {
printk(KERN_DEBUG "Could not find i8259 PIC\n");
return;
}
cascade_irq = irq_of_parse_and_map(cascade_node, 0);
if (!cascade_irq) {
printk(KERN_ERR "Failed to map cascade interrupt\n");
return;
}
i8259_init(cascade_node, 0);
of_node_put(cascade_node);
irq_set_chained_handler(cascade_irq, mpc86xx_8259_cascade);
#endif
}
| linux-master | arch/powerpc/platforms/86xx/pic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Routines common to most mpc86xx-based boards.
*/
#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <linux/of_platform.h>
#include <asm/reg.h>
#include <asm/synch.h>
#include "mpc86xx.h"
static const struct of_device_id mpc86xx_common_ids[] __initconst = {
{ .type = "soc", },
{ .compatible = "soc", },
{ .compatible = "simple-bus", },
{ .name = "localbus", },
{ .compatible = "gianfar", },
{ .compatible = "fsl,mpc8641-pcie", },
{},
};
int __init mpc86xx_common_publish_devices(void)
{
return of_platform_bus_probe(NULL, mpc86xx_common_ids, NULL);
}
long __init mpc86xx_time_init(void)
{
unsigned int temp;
/* Set the time base to zero */
mtspr(SPRN_TBWL, 0);
mtspr(SPRN_TBWU, 0);
temp = mfspr(SPRN_HID0);
temp |= HID0_TBEN;
mtspr(SPRN_HID0, temp);
isync();
return 0;
}
| linux-master | arch/powerpc/platforms/86xx/common.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GE SBC310 board support
*
* Author: Martyn Welch <[email protected]>
*
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* Based on: mpc86xx_hpcn.c (MPC86xx HPCN board specific routines)
* Copyright 2006 Freescale Semiconductor Inc.
*
* NEC fixup adapted from arch/mips/pci/fixup-lm2e.c
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/nvram.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/ge/ge_pic.h>
#include "mpc86xx.h"
#undef DEBUG
#ifdef DEBUG
#define DBG (fmt...) do { printk(KERN_ERR "SBC310: " fmt); } while (0)
#else
#define DBG (fmt...) do { } while (0)
#endif
void __iomem *sbc310_regs;
static void __init gef_sbc310_init_irq(void)
{
struct device_node *cascade_node = NULL;
mpc86xx_init_irq();
/*
* There is a simple interrupt handler in the main FPGA, this needs
* to be cascaded into the MPIC
*/
cascade_node = of_find_compatible_node(NULL, NULL, "gef,fpga-pic");
if (!cascade_node) {
printk(KERN_WARNING "SBC310: No FPGA PIC\n");
return;
}
gef_pic_init(cascade_node);
of_node_put(cascade_node);
}
static void __init gef_sbc310_setup_arch(void)
{
struct device_node *regs;
printk(KERN_INFO "GE Intelligent Platforms SBC310 6U VPX SBC\n");
#ifdef CONFIG_SMP
mpc86xx_smp_init();
#endif
fsl_pci_assign_primary();
/* Remap basic board registers */
regs = of_find_compatible_node(NULL, NULL, "gef,fpga-regs");
if (regs) {
sbc310_regs = of_iomap(regs, 0);
if (sbc310_regs == NULL)
printk(KERN_WARNING "Unable to map board registers\n");
of_node_put(regs);
}
#if defined(CONFIG_MMIO_NVRAM)
mmio_nvram_init();
#endif
}
/* Return the PCB revision */
static unsigned int gef_sbc310_get_board_id(void)
{
unsigned int reg;
reg = ioread32(sbc310_regs);
return reg & 0xff;
}
/* Return the PCB revision */
static unsigned int gef_sbc310_get_pcb_rev(void)
{
unsigned int reg;
reg = ioread32(sbc310_regs);
return (reg >> 8) & 0xff;
}
/* Return the board (software) revision */
static unsigned int gef_sbc310_get_board_rev(void)
{
unsigned int reg;
reg = ioread32(sbc310_regs);
return (reg >> 16) & 0xff;
}
/* Return the FPGA revision */
static unsigned int gef_sbc310_get_fpga_rev(void)
{
unsigned int reg;
reg = ioread32(sbc310_regs);
return (reg >> 24) & 0xf;
}
static void gef_sbc310_show_cpuinfo(struct seq_file *m)
{
uint svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n");
seq_printf(m, "Board ID\t: 0x%2.2x\n", gef_sbc310_get_board_id());
seq_printf(m, "Revision\t: %u%c\n", gef_sbc310_get_pcb_rev(),
('A' + gef_sbc310_get_board_rev() - 1));
seq_printf(m, "FPGA Revision\t: %u\n", gef_sbc310_get_fpga_rev());
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
}
static void gef_sbc310_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
/* Do not do the fixup on other platforms! */
if (!machine_is(gef_sbc310))
return;
printk(KERN_INFO "Running NEC uPD720101 Fixup\n");
/* Ensure only ports 1 & 2 are enabled */
pci_read_config_dword(pdev, 0xe0, &val);
pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x2);
/* System clock is 48-MHz Oscillator and EHCI Enabled. */
pci_write_config_dword(pdev, 0xe4, 1 << 5);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
gef_sbc310_nec_fixup);
machine_arch_initcall(gef_sbc310, mpc86xx_common_publish_devices);
define_machine(gef_sbc310) {
.name = "GE SBC310",
.compatible = "gef,sbc310",
.setup_arch = gef_sbc310_setup_arch,
.init_IRQ = gef_sbc310_init_irq,
.show_cpuinfo = gef_sbc310_show_cpuinfo,
.get_irq = mpic_get_irq,
.time_init = mpc86xx_time_init,
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
};
| linux-master | arch/powerpc/platforms/86xx/gef_sbc310.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GE PPC9A board support
*
* Author: Martyn Welch <[email protected]>
*
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* Based on: mpc86xx_hpcn.c (MPC86xx HPCN board specific routines)
* Copyright 2006 Freescale Semiconductor Inc.
*
* NEC fixup adapted from arch/mips/pci/fixup-lm2e.c
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/nvram.h>
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/ge/ge_pic.h>
#include "mpc86xx.h"
#undef DEBUG
#ifdef DEBUG
#define DBG (fmt...) do { printk(KERN_ERR "PPC9A: " fmt); } while (0)
#else
#define DBG (fmt...) do { } while (0)
#endif
void __iomem *ppc9a_regs;
static void __init gef_ppc9a_init_irq(void)
{
struct device_node *cascade_node = NULL;
mpc86xx_init_irq();
/*
* There is a simple interrupt handler in the main FPGA, this needs
* to be cascaded into the MPIC
*/
cascade_node = of_find_compatible_node(NULL, NULL, "gef,fpga-pic-1.00");
if (!cascade_node) {
printk(KERN_WARNING "PPC9A: No FPGA PIC\n");
return;
}
gef_pic_init(cascade_node);
of_node_put(cascade_node);
}
static void __init gef_ppc9a_setup_arch(void)
{
struct device_node *regs;
printk(KERN_INFO "GE Intelligent Platforms PPC9A 6U VME SBC\n");
#ifdef CONFIG_SMP
mpc86xx_smp_init();
#endif
fsl_pci_assign_primary();
/* Remap basic board registers */
regs = of_find_compatible_node(NULL, NULL, "gef,ppc9a-fpga-regs");
if (regs) {
ppc9a_regs = of_iomap(regs, 0);
if (ppc9a_regs == NULL)
printk(KERN_WARNING "Unable to map board registers\n");
of_node_put(regs);
}
#if defined(CONFIG_MMIO_NVRAM)
mmio_nvram_init();
#endif
}
/* Return the PCB revision */
static unsigned int gef_ppc9a_get_pcb_rev(void)
{
unsigned int reg;
reg = ioread32be(ppc9a_regs);
return (reg >> 16) & 0xff;
}
/* Return the board (software) revision */
static unsigned int gef_ppc9a_get_board_rev(void)
{
unsigned int reg;
reg = ioread32be(ppc9a_regs);
return (reg >> 8) & 0xff;
}
/* Return the FPGA revision */
static unsigned int gef_ppc9a_get_fpga_rev(void)
{
unsigned int reg;
reg = ioread32be(ppc9a_regs);
return reg & 0xf;
}
/* Return VME Geographical Address */
static unsigned int gef_ppc9a_get_vme_geo_addr(void)
{
unsigned int reg;
reg = ioread32be(ppc9a_regs + 0x4);
return reg & 0x1f;
}
/* Return VME System Controller Status */
static unsigned int gef_ppc9a_get_vme_is_syscon(void)
{
unsigned int reg;
reg = ioread32be(ppc9a_regs + 0x4);
return (reg >> 9) & 0x1;
}
static void gef_ppc9a_show_cpuinfo(struct seq_file *m)
{
uint svid = mfspr(SPRN_SVR);
seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n");
seq_printf(m, "Revision\t: %u%c\n", gef_ppc9a_get_pcb_rev(),
('A' + gef_ppc9a_get_board_rev()));
seq_printf(m, "FPGA Revision\t: %u\n", gef_ppc9a_get_fpga_rev());
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
seq_printf(m, "VME geo. addr\t: %u\n", gef_ppc9a_get_vme_geo_addr());
seq_printf(m, "VME syscon\t: %s\n",
gef_ppc9a_get_vme_is_syscon() ? "yes" : "no");
}
static void gef_ppc9a_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
/* Do not do the fixup on other platforms! */
if (!machine_is(gef_ppc9a))
return;
printk(KERN_INFO "Running NEC uPD720101 Fixup\n");
/* Ensure ports 1, 2, 3, 4 & 5 are enabled */
pci_read_config_dword(pdev, 0xe0, &val);
pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x5);
/* System clock is 48-MHz Oscillator and EHCI Enabled. */
pci_write_config_dword(pdev, 0xe4, 1 << 5);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
gef_ppc9a_nec_fixup);
machine_arch_initcall(gef_ppc9a, mpc86xx_common_publish_devices);
define_machine(gef_ppc9a) {
.name = "GE PPC9A",
.compatible = "gef,ppc9a",
.setup_arch = gef_ppc9a_setup_arch,
.init_IRQ = gef_ppc9a_init_irq,
.show_cpuinfo = gef_ppc9a_show_cpuinfo,
.get_irq = mpic_get_irq,
.time_init = mpc86xx_time_init,
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
};
| linux-master | arch/powerpc/platforms/86xx/gef_ppc9a.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Board setup routines for the Emerson/Artesyn MVME7100
*
* Copyright 2016 Elettra-Sincrotrone Trieste S.C.p.A.
*
* Author: Alessio Igor Bogani <[email protected]>
*
* Based on earlier code by:
*
* Ajit Prem <[email protected]>
* Copyright 2008 Emerson
*
* USB host fixup is borrowed by:
*
* Martyn Welch <[email protected]>
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*/
#include <linux/pci.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/of_address.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include "mpc86xx.h"
#define MVME7100_INTERRUPT_REG_2_OFFSET 0x05
#define MVME7100_DS1375_MASK 0x40
#define MVME7100_MAX6649_MASK 0x20
#define MVME7100_ABORT_MASK 0x10
/*
* Setup the architecture
*/
static void __init mvme7100_setup_arch(void)
{
struct device_node *bcsr_node;
void __iomem *mvme7100_regs = NULL;
u8 reg;
if (ppc_md.progress)
ppc_md.progress("mvme7100_setup_arch()", 0);
#ifdef CONFIG_SMP
mpc86xx_smp_init();
#endif
fsl_pci_assign_primary();
/* Remap BCSR registers */
bcsr_node = of_find_compatible_node(NULL, NULL,
"artesyn,mvme7100-bcsr");
if (bcsr_node) {
mvme7100_regs = of_iomap(bcsr_node, 0);
of_node_put(bcsr_node);
}
if (mvme7100_regs) {
/* Disable ds1375, max6649, and abort interrupts */
reg = readb(mvme7100_regs + MVME7100_INTERRUPT_REG_2_OFFSET);
reg |= MVME7100_DS1375_MASK | MVME7100_MAX6649_MASK
| MVME7100_ABORT_MASK;
writeb(reg, mvme7100_regs + MVME7100_INTERRUPT_REG_2_OFFSET);
} else
pr_warn("Unable to map board registers\n");
pr_info("MVME7100 board from Artesyn\n");
}
/*
* Called very early, device-tree isn't unflattened
*/
static int __init mvme7100_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "artesyn,MVME7100");
}
static void mvme7100_usb_host_fixup(struct pci_dev *pdev)
{
unsigned int val;
if (!machine_is(mvme7100))
return;
/* Ensure only ports 1 & 2 are enabled */
pci_read_config_dword(pdev, 0xe0, &val);
pci_write_config_dword(pdev, 0xe0, (val & ~7) | 0x2);
/* System clock is 48-MHz Oscillator and EHCI Enabled. */
pci_write_config_dword(pdev, 0xe4, 1 << 5);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
mvme7100_usb_host_fixup);
machine_arch_initcall(mvme7100, mpc86xx_common_publish_devices);
define_machine(mvme7100) {
.name = "MVME7100",
.probe = mvme7100_probe,
.setup_arch = mvme7100_setup_arch,
.init_IRQ = mpc86xx_init_irq,
.get_irq = mpic_get_irq,
.time_init = mpc86xx_time_init,
.progress = udbg_progress,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
#endif
};
| linux-master | arch/powerpc/platforms/86xx/mvme7100.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Author: Xianghua Xiao <[email protected]>
* Zhang Wei <[email protected]>
*
* Copyright 2006 Freescale Semiconductor Inc.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pgtable.h>
#include <asm/code-patching.h>
#include <asm/page.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
#include <asm/cacheflush.h>
#include <asm/inst.h>
#include <sysdev/fsl_soc.h>
#include "mpc86xx.h"
extern void __secondary_start_mpc86xx(void);
#define MCM_PORT_CONFIG_OFFSET 0x10
/* Offset from CCSRBAR */
#define MPC86xx_MCM_OFFSET (0x1000)
#define MPC86xx_MCM_SIZE (0x1000)
static void __init
smp_86xx_release_core(int nr)
{
__be32 __iomem *mcm_vaddr;
unsigned long pcr;
if (nr < 0 || nr >= NR_CPUS)
return;
/*
* Startup Core #nr.
*/
mcm_vaddr = ioremap(get_immrbase() + MPC86xx_MCM_OFFSET,
MPC86xx_MCM_SIZE);
pcr = in_be32(mcm_vaddr + (MCM_PORT_CONFIG_OFFSET >> 2));
pcr |= 1 << (nr + 24);
out_be32(mcm_vaddr + (MCM_PORT_CONFIG_OFFSET >> 2), pcr);
iounmap(mcm_vaddr);
}
static int __init
smp_86xx_kick_cpu(int nr)
{
unsigned int save_vector;
unsigned long target, flags;
int n = 0;
unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100);
if (nr < 0 || nr >= NR_CPUS)
return -ENOENT;
pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr);
local_irq_save(flags);
/* Save reset vector */
save_vector = *vector;
/* Setup fake reset vector to call __secondary_start_mpc86xx. */
target = (unsigned long) __secondary_start_mpc86xx;
patch_branch(vector, target, BRANCH_SET_LINK);
/* Kick that CPU */
smp_86xx_release_core(nr);
/* Wait a bit for the CPU to take the exception. */
while ((__secondary_hold_acknowledge != nr) && (n++, n < 1000))
mdelay(1);
/* Restore the exception vector */
patch_instruction(vector, ppc_inst(save_vector));
local_irq_restore(flags);
pr_debug("wait CPU #%d for %d msecs.\n", nr, n);
return 0;
}
static void __init
smp_86xx_setup_cpu(int cpu_nr)
{
mpic_setup_this_cpu();
}
struct smp_ops_t smp_86xx_ops = {
.cause_nmi_ipi = NULL,
.message_pass = smp_mpic_message_pass,
.probe = smp_mpic_probe,
.kick_cpu = smp_86xx_kick_cpu,
.setup_cpu = smp_86xx_setup_cpu,
.take_timebase = smp_generic_take_timebase,
.give_timebase = smp_generic_give_timebase,
};
void __init
mpc86xx_smp_init(void)
{
smp_ops = &smp_86xx_ops;
}
| linux-master | arch/powerpc/platforms/86xx/mpc86xx_smp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic PowerPC 40x platform support
*
* Copyright 2008 IBM Corporation
*
* This implements simple platform support for PowerPC 44x chips. This is
* mostly used for eval boards or other simple and "generic" 44x boards. If
* your board has custom functions or hardware, then you will likely want to
* implement your own board.c file to accommodate it.
*/
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/ppc4xx.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include <asm/uic.h>
#include <linux/init.h>
#include <linux/of_platform.h>
static const struct of_device_id ppc40x_of_bus[] __initconst = {
{ .compatible = "ibm,plb3", },
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{ .compatible = "simple-bus", },
{},
};
static int __init ppc40x_device_probe(void)
{
of_platform_bus_probe(NULL, ppc40x_of_bus, NULL);
return 0;
}
machine_device_initcall(ppc40x_simple, ppc40x_device_probe);
/* This is the list of boards that can be supported by this simple
* platform code. This does _not_ mean the boards are compatible,
* as they most certainly are not from a device tree perspective.
* However, their differences are handled by the device tree and the
* drivers and therefore they don't need custom board support files.
*
* Again, if your board needs to do things differently then create a
* board.c file for it rather than adding it to this list.
*/
static const char * const board[] __initconst = {
"amcc,acadia",
"amcc,haleakala",
"amcc,kilauea",
"amcc,makalu",
"apm,klondike",
"est,hotfoot",
"plathome,obs600",
NULL
};
static int __init ppc40x_probe(void)
{
if (of_device_compatible_match(of_root, board)) {
pci_set_flags(PCI_REASSIGN_ALL_RSRC);
return 1;
}
return 0;
}
define_machine(ppc40x_simple) {
.name = "PowerPC 40x Platform",
.probe = ppc40x_probe,
.progress = udbg_progress,
.init_IRQ = uic_init_tree,
.get_irq = uic_get_irq,
.restart = ppc4xx_reset_system,
};
| linux-master | arch/powerpc/platforms/40x/ppc40x_simple.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Keymile km82xx support
* Copyright 2008-2011 DENX Software Engineering GmbH
* Author: Heiko Schocher <[email protected]>
*
* based on code from:
* Copyright 2007 Freescale Semiconductor, Inc.
* Author: Scott Wood <[email protected]>
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#include <asm/cpm2.h>
#include <asm/udbg.h>
#include <asm/machdep.h>
#include <linux/time.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/cpm2_pic.h>
#include "pq2.h"
static void __init km82xx_pic_init(void)
{
struct device_node *np = of_find_compatible_node(NULL, NULL,
"fsl,pq2-pic");
if (!np) {
pr_err("PIC init: can not find cpm-pic node\n");
return;
}
cpm2_pic_init(np);
of_node_put(np);
}
struct cpm_pin {
int port, pin, flags;
};
static __initdata struct cpm_pin km82xx_pins[] = {
/* SMC1 */
{2, 4, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 5, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
/* SMC2 */
{0, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{0, 9, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
/* SCC1 */
{2, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
/* SCC4 */
{2, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 24, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 9, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{3, 22, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{3, 21, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
/* FCC1 */
{0, 14, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{0, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{0, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{0, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{0, 18, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{0, 19, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{0, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{0, 21, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{0, 26, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
{0, 27, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
{0, 28, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{0, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{0, 30, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
{0, 31, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
{2, 22, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 23, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
/* FCC2 */
{1, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 20, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 22, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 25, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 26, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{1, 30, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 31, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{2, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
/* MDC */
{0, 13, CPM_PIN_OUTPUT | CPM_PIN_GPIO},
#if defined(CONFIG_I2C_CPM)
/* I2C */
{3, 14, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN},
{3, 15, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN},
#endif
/* USB */
{0, 10, CPM_PIN_OUTPUT | CPM_PIN_GPIO}, /* FULL_SPEED */
{0, 11, CPM_PIN_OUTPUT | CPM_PIN_GPIO}, /*/SLAVE */
{2, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXN */
{2, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXP */
{2, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* /OE */
{2, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXCLK */
{3, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* TXP */
{3, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* TXN */
{3, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXD */
/* SPI */
{3, 16, CPM_PIN_INPUT | CPM_PIN_SECONDARY},/* SPI_MISO PD16 */
{3, 17, CPM_PIN_INPUT | CPM_PIN_SECONDARY},/* SPI_MOSI PD17 */
{3, 18, CPM_PIN_INPUT | CPM_PIN_SECONDARY},/* SPI_CLK PD18 */
};
static void __init init_ioports(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(km82xx_pins); i++) {
const struct cpm_pin *pin = &km82xx_pins[i];
cpm2_set_pin(pin->port, pin->pin, pin->flags);
}
cpm2_smc_clk_setup(CPM_CLK_SMC2, CPM_BRG8);
cpm2_smc_clk_setup(CPM_CLK_SMC1, CPM_BRG7);
cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_TX);
cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RTX);
cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK7, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK8, CPM_CLK_TX);
cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK9, CPM_CLK_TX);
cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX);
/* Force USB FULL SPEED bit to '1' */
setbits32(&cpm2_immr->im_ioport.iop_pdata, 1 << (31 - 10));
/* clear USB_SLAVE */
clrbits32(&cpm2_immr->im_ioport.iop_pdata, 1 << (31 - 11));
}
static void __init km82xx_setup_arch(void)
{
if (ppc_md.progress)
ppc_md.progress("km82xx_setup_arch()", 0);
cpm2_reset();
/* When this is set, snooping CPM DMA from RAM causes
* machine checks. See erratum SIU18.
*/
clrbits32(&cpm2_immr->im_siu_conf.siu_82xx.sc_bcr, MPC82XX_BCR_PLDP);
init_ioports();
if (ppc_md.progress)
ppc_md.progress("km82xx_setup_arch(), finish", 0);
}
static const struct of_device_id of_bus_ids[] __initconst = {
{ .compatible = "simple-bus", },
{},
};
static int __init declare_of_platform_devices(void)
{
of_platform_bus_probe(NULL, of_bus_ids, NULL);
return 0;
}
machine_device_initcall(km82xx, declare_of_platform_devices);
define_machine(km82xx)
{
.name = "Keymile km82xx",
.compatible = "keymile,km82xx",
.setup_arch = km82xx_setup_arch,
.init_IRQ = km82xx_pic_init,
.get_irq = cpm2_get_irq,
.restart = pq2_restart,
.progress = udbg_progress,
};
| linux-master | arch/powerpc/platforms/82xx/km82xx.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Embedded Planet EP8248E support
*
* Copyright 2007 Freescale Semiconductor, Inc.
* Author: Scott Wood <[email protected]>
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
#include <linux/mdio-bitbang.h>
#include <linux/of_mdio.h>
#include <linux/slab.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/cpm2.h>
#include <asm/udbg.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/cpm2_pic.h>
#include "pq2.h"
static u8 __iomem *ep8248e_bcsr;
static struct device_node *ep8248e_bcsr_node;
#define BCSR7_SCC2_ENABLE 0x10
#define BCSR8_PHY1_ENABLE 0x80
#define BCSR8_PHY1_POWER 0x40
#define BCSR8_PHY2_ENABLE 0x20
#define BCSR8_PHY2_POWER 0x10
#define BCSR8_MDIO_READ 0x04
#define BCSR8_MDIO_CLOCK 0x02
#define BCSR8_MDIO_DATA 0x01
#define BCSR9_USB_ENABLE 0x80
#define BCSR9_USB_POWER 0x40
#define BCSR9_USB_HOST 0x20
#define BCSR9_USB_FULL_SPEED_TARGET 0x10
static void __init ep8248e_pic_init(void)
{
struct device_node *np = of_find_compatible_node(NULL, NULL, "fsl,pq2-pic");
if (!np) {
printk(KERN_ERR "PIC init: can not find cpm-pic node\n");
return;
}
cpm2_pic_init(np);
of_node_put(np);
}
static void ep8248e_set_mdc(struct mdiobb_ctrl *ctrl, int level)
{
if (level)
setbits8(&ep8248e_bcsr[8], BCSR8_MDIO_CLOCK);
else
clrbits8(&ep8248e_bcsr[8], BCSR8_MDIO_CLOCK);
/* Read back to flush the write. */
in_8(&ep8248e_bcsr[8]);
}
static void ep8248e_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
{
if (output)
clrbits8(&ep8248e_bcsr[8], BCSR8_MDIO_READ);
else
setbits8(&ep8248e_bcsr[8], BCSR8_MDIO_READ);
/* Read back to flush the write. */
in_8(&ep8248e_bcsr[8]);
}
static void ep8248e_set_mdio_data(struct mdiobb_ctrl *ctrl, int data)
{
if (data)
setbits8(&ep8248e_bcsr[8], BCSR8_MDIO_DATA);
else
clrbits8(&ep8248e_bcsr[8], BCSR8_MDIO_DATA);
/* Read back to flush the write. */
in_8(&ep8248e_bcsr[8]);
}
static int ep8248e_get_mdio_data(struct mdiobb_ctrl *ctrl)
{
return in_8(&ep8248e_bcsr[8]) & BCSR8_MDIO_DATA;
}
static const struct mdiobb_ops ep8248e_mdio_ops = {
.set_mdc = ep8248e_set_mdc,
.set_mdio_dir = ep8248e_set_mdio_dir,
.set_mdio_data = ep8248e_set_mdio_data,
.get_mdio_data = ep8248e_get_mdio_data,
.owner = THIS_MODULE,
};
static struct mdiobb_ctrl ep8248e_mdio_ctrl = {
.ops = &ep8248e_mdio_ops,
};
static int ep8248e_mdio_probe(struct platform_device *ofdev)
{
struct mii_bus *bus;
struct resource res;
struct device_node *node;
int ret;
node = of_get_parent(ofdev->dev.of_node);
of_node_put(node);
if (node != ep8248e_bcsr_node)
return -ENODEV;
ret = of_address_to_resource(ofdev->dev.of_node, 0, &res);
if (ret)
return ret;
bus = alloc_mdio_bitbang(&ep8248e_mdio_ctrl);
if (!bus)
return -ENOMEM;
bus->name = "ep8248e-mdio-bitbang";
bus->parent = &ofdev->dev;
snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
ret = of_mdiobus_register(bus, ofdev->dev.of_node);
if (ret)
goto err_free_bus;
return 0;
err_free_bus:
free_mdio_bitbang(bus);
return ret;
}
static const struct of_device_id ep8248e_mdio_match[] = {
{
.compatible = "fsl,ep8248e-mdio-bitbang",
},
{},
};
static struct platform_driver ep8248e_mdio_driver = {
.driver = {
.name = "ep8248e-mdio-bitbang",
.of_match_table = ep8248e_mdio_match,
.suppress_bind_attrs = true,
},
.probe = ep8248e_mdio_probe,
};
struct cpm_pin {
int port, pin, flags;
};
static __initdata struct cpm_pin ep8248e_pins[] = {
/* SMC1 */
{2, 4, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 5, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
/* SCC1 */
{2, 14, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{3, 29, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
/* FCC1 */
{0, 14, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{0, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{0, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{0, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{0, 18, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{0, 19, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{0, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{0, 21, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{0, 26, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
{0, 27, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
{0, 28, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{0, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{0, 30, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
{0, 31, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
{2, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 22, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
/* FCC2 */
{1, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 20, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 22, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 25, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{1, 26, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
{1, 30, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{1, 31, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{2, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
/* I2C */
{4, 14, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
{4, 15, CPM_PIN_INPUT | CPM_PIN_SECONDARY},
/* USB */
{2, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{2, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{2, 24, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
{3, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{3, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
{3, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
};
static void __init init_ioports(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(ep8248e_pins); i++) {
const struct cpm_pin *pin = &ep8248e_pins[i];
cpm2_set_pin(pin->port, pin->pin, pin->flags);
}
cpm2_smc_clk_setup(CPM_CLK_SMC1, CPM_BRG7);
cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_TX);
cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK8, CPM_CLK_TX); /* USB */
cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK11, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_TX);
cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX);
}
static void __init ep8248e_setup_arch(void)
{
if (ppc_md.progress)
ppc_md.progress("ep8248e_setup_arch()", 0);
cpm2_reset();
/* When this is set, snooping CPM DMA from RAM causes
* machine checks. See erratum SIU18.
*/
clrbits32(&cpm2_immr->im_siu_conf.siu_82xx.sc_bcr, MPC82XX_BCR_PLDP);
ep8248e_bcsr_node =
of_find_compatible_node(NULL, NULL, "fsl,ep8248e-bcsr");
if (!ep8248e_bcsr_node) {
printk(KERN_ERR "No bcsr in device tree\n");
return;
}
ep8248e_bcsr = of_iomap(ep8248e_bcsr_node, 0);
if (!ep8248e_bcsr) {
printk(KERN_ERR "Cannot map BCSR registers\n");
of_node_put(ep8248e_bcsr_node);
ep8248e_bcsr_node = NULL;
return;
}
setbits8(&ep8248e_bcsr[7], BCSR7_SCC2_ENABLE);
setbits8(&ep8248e_bcsr[8], BCSR8_PHY1_ENABLE | BCSR8_PHY1_POWER |
BCSR8_PHY2_ENABLE | BCSR8_PHY2_POWER);
init_ioports();
if (ppc_md.progress)
ppc_md.progress("ep8248e_setup_arch(), finish", 0);
}
static const struct of_device_id of_bus_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "fsl,ep8248e-bcsr", },
{},
};
static int __init declare_of_platform_devices(void)
{
of_platform_bus_probe(NULL, of_bus_ids, NULL);
if (IS_ENABLED(CONFIG_MDIO_BITBANG))
platform_driver_register(&ep8248e_mdio_driver);
return 0;
}
machine_device_initcall(ep8248e, declare_of_platform_devices);
define_machine(ep8248e)
{
.name = "Embedded Planet EP8248E",
.compatible = "fsl,ep8248e",
.setup_arch = ep8248e_setup_arch,
.init_IRQ = ep8248e_pic_init,
.get_irq = cpm2_get_irq,
.restart = pq2_restart,
.progress = udbg_progress,
};
| linux-master | arch/powerpc/platforms/82xx/ep8248e.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Common PowerQUICC II code.
*
* Author: Scott Wood <[email protected]>
* Copyright (c) 2007 Freescale Semiconductor
*
* Based on code by Vitaly Bordug <[email protected]>
* pq2_restart fix by Wade Farnsworth <[email protected]>
* Copyright (c) 2006 MontaVista Software, Inc.
*/
#include <linux/kprobes.h>
#include <asm/cpm2.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <platforms/82xx/pq2.h>
#define RMR_CSRE 0x00000001
void __noreturn pq2_restart(char *cmd)
{
local_irq_disable();
setbits32(&cpm2_immr->im_clkrst.car_rmr, RMR_CSRE);
/* Clear the ME,EE,IR & DR bits in MSR to cause checkstop */
mtmsr(mfmsr() & ~(MSR_ME | MSR_EE | MSR_IR | MSR_DR));
in_8(&cpm2_immr->im_clkrst.res[0]);
panic("Restart failed\n");
}
NOKPROBE_SYMBOL(pq2_restart)
| linux-master | arch/powerpc/platforms/82xx/pq2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AmigaOne platform setup
*
* Copyright 2008 Gerhard Pircher ([email protected])
*
* Based on original amigaone_setup.c source code
* Copyright 2003 by Hans-Joerg Frieden and Thomas Frieden
*/
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/seq_file.h>
#include <generated/utsrelease.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/pci-bridge.h>
#include <asm/i8259.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include <asm/dma.h>
extern void __flush_disable_L1(void);
void amigaone_show_cpuinfo(struct seq_file *m)
{
seq_printf(m, "vendor\t\t: Eyetech Ltd.\n");
}
static int __init amigaone_add_bridge(struct device_node *dev)
{
const u32 *cfg_addr, *cfg_data;
int len;
const int *bus_range;
struct pci_controller *hose;
printk(KERN_INFO "Adding PCI host bridge %pOF\n", dev);
cfg_addr = of_get_address(dev, 0, NULL, NULL);
cfg_data = of_get_address(dev, 1, NULL, NULL);
if ((cfg_addr == NULL) || (cfg_data == NULL))
return -ENODEV;
bus_range = of_get_property(dev, "bus-range", &len);
if ((bus_range == NULL) || (len < 2 * sizeof(int)))
printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
" bus 0\n", dev);
hose = pcibios_alloc_controller(dev);
if (hose == NULL)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
setup_indirect_pci(hose, cfg_addr[0], cfg_data[0], 0);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, 1);
return 0;
}
void __init amigaone_setup_arch(void)
{
if (ppc_md.progress)
ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0);
}
static void __init amigaone_discover_phbs(void)
{
struct device_node *np;
int phb = -ENODEV;
/* Lookup PCI host bridges. */
for_each_compatible_node(np, "pci", "mai-logic,articia-s")
phb = amigaone_add_bridge(np);
BUG_ON(phb != 0);
}
void __init amigaone_init_IRQ(void)
{
struct device_node *pic, *np = NULL;
const unsigned long *prop = NULL;
unsigned long int_ack = 0;
/* Search for ISA interrupt controller. */
pic = of_find_compatible_node(NULL, "interrupt-controller",
"pnpPNP,000");
BUG_ON(pic == NULL);
/* Look for interrupt acknowledge address in the PCI root node. */
np = of_find_compatible_node(NULL, "pci", "mai-logic,articia-s");
if (np) {
prop = of_get_property(np, "8259-interrupt-acknowledge", NULL);
if (prop)
int_ack = prop[0];
of_node_put(np);
}
if (int_ack == 0)
printk(KERN_WARNING "Cannot find PCI interrupt acknowledge"
" address, polling\n");
i8259_init(pic, int_ack);
ppc_md.get_irq = i8259_irq;
irq_set_default_host(i8259_get_host());
}
static int __init request_isa_regions(void)
{
request_region(0x00, 0x20, "dma1");
request_region(0x40, 0x20, "timer");
request_region(0x80, 0x10, "dma page reg");
request_region(0xc0, 0x20, "dma2");
return 0;
}
machine_device_initcall(amigaone, request_isa_regions);
void __noreturn amigaone_restart(char *cmd)
{
local_irq_disable();
/* Flush and disable caches. */
__flush_disable_L1();
/* Set SRR0 to the reset vector and turn on MSR_IP. */
mtspr(SPRN_SRR0, 0xfff00100);
mtspr(SPRN_SRR1, MSR_IP);
/* Do an rfi to jump back to firmware. */
__asm__ __volatile__("rfi" : : : "memory");
/* Not reached. */
while (1);
}
static int __init amigaone_probe(void)
{
/*
* Coherent memory access cause complete system lockup! Thus
* disable this CPU feature, even if the CPU needs it.
*/
cur_cpu_spec->cpu_features &= ~CPU_FTR_NEED_COHERENT;
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
return 1;
}
define_machine(amigaone) {
.name = "AmigaOne",
.compatible = "eyetech,amigaone",
.probe = amigaone_probe,
.setup_arch = amigaone_setup_arch,
.discover_phbs = amigaone_discover_phbs,
.show_cpuinfo = amigaone_show_cpuinfo,
.init_IRQ = amigaone_init_IRQ,
.restart = amigaone_restart,
.progress = udbg_progress,
};
| linux-master | arch/powerpc/platforms/amigaone/setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* pmi driver
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
*
* PMI (Platform Management Interrupt) is a way to communicate
* with the BMC (Baseboard Management Controller) via interrupts.
* Unlike IPMI it is bidirectional and has a low latency.
*
* Author: Christian Krafft <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/workqueue.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/pmi.h>
struct pmi_data {
struct list_head handler;
spinlock_t handler_spinlock;
spinlock_t pmi_spinlock;
struct mutex msg_mutex;
pmi_message_t msg;
struct completion *completion;
struct platform_device *dev;
int irq;
u8 __iomem *pmi_reg;
struct work_struct work;
};
static struct pmi_data *data;
static irqreturn_t pmi_irq_handler(int irq, void *dev_id)
{
u8 type;
int rc;
spin_lock(&data->pmi_spinlock);
type = ioread8(data->pmi_reg + PMI_READ_TYPE);
pr_debug("pmi: got message of type %d\n", type);
if (type & PMI_ACK && !data->completion) {
printk(KERN_WARNING "pmi: got unexpected ACK message.\n");
rc = -EIO;
goto unlock;
}
if (data->completion && !(type & PMI_ACK)) {
printk(KERN_WARNING "pmi: expected ACK, but got %d\n", type);
rc = -EIO;
goto unlock;
}
data->msg.type = type;
data->msg.data0 = ioread8(data->pmi_reg + PMI_READ_DATA0);
data->msg.data1 = ioread8(data->pmi_reg + PMI_READ_DATA1);
data->msg.data2 = ioread8(data->pmi_reg + PMI_READ_DATA2);
rc = 0;
unlock:
spin_unlock(&data->pmi_spinlock);
if (rc == -EIO) {
rc = IRQ_HANDLED;
goto out;
}
if (data->msg.type & PMI_ACK) {
complete(data->completion);
rc = IRQ_HANDLED;
goto out;
}
schedule_work(&data->work);
rc = IRQ_HANDLED;
out:
return rc;
}
static const struct of_device_id pmi_match[] = {
{ .type = "ibm,pmi", .name = "ibm,pmi" },
{ .type = "ibm,pmi" },
{},
};
MODULE_DEVICE_TABLE(of, pmi_match);
static void pmi_notify_handlers(struct work_struct *work)
{
struct pmi_handler *handler;
spin_lock(&data->handler_spinlock);
list_for_each_entry(handler, &data->handler, node) {
pr_debug("pmi: notifying handler %p\n", handler);
if (handler->type == data->msg.type)
handler->handle_pmi_message(data->msg);
}
spin_unlock(&data->handler_spinlock);
}
static int pmi_of_probe(struct platform_device *dev)
{
struct device_node *np = dev->dev.of_node;
int rc;
if (data) {
printk(KERN_ERR "pmi: driver has already been initialized.\n");
rc = -EBUSY;
goto out;
}
data = kzalloc(sizeof(struct pmi_data), GFP_KERNEL);
if (!data) {
printk(KERN_ERR "pmi: could not allocate memory.\n");
rc = -ENOMEM;
goto out;
}
data->pmi_reg = of_iomap(np, 0);
if (!data->pmi_reg) {
printk(KERN_ERR "pmi: invalid register address.\n");
rc = -EFAULT;
goto error_cleanup_data;
}
INIT_LIST_HEAD(&data->handler);
mutex_init(&data->msg_mutex);
spin_lock_init(&data->pmi_spinlock);
spin_lock_init(&data->handler_spinlock);
INIT_WORK(&data->work, pmi_notify_handlers);
data->dev = dev;
data->irq = irq_of_parse_and_map(np, 0);
if (!data->irq) {
printk(KERN_ERR "pmi: invalid interrupt.\n");
rc = -EFAULT;
goto error_cleanup_iomap;
}
rc = request_irq(data->irq, pmi_irq_handler, 0, "pmi", NULL);
if (rc) {
printk(KERN_ERR "pmi: can't request IRQ %d: returned %d\n",
data->irq, rc);
goto error_cleanup_iomap;
}
printk(KERN_INFO "pmi: found pmi device at addr %p.\n", data->pmi_reg);
goto out;
error_cleanup_iomap:
iounmap(data->pmi_reg);
error_cleanup_data:
kfree(data);
out:
return rc;
}
static int pmi_of_remove(struct platform_device *dev)
{
struct pmi_handler *handler, *tmp;
free_irq(data->irq, NULL);
iounmap(data->pmi_reg);
spin_lock(&data->handler_spinlock);
list_for_each_entry_safe(handler, tmp, &data->handler, node)
list_del(&handler->node);
spin_unlock(&data->handler_spinlock);
kfree(data);
data = NULL;
return 0;
}
static struct platform_driver pmi_of_platform_driver = {
.probe = pmi_of_probe,
.remove = pmi_of_remove,
.driver = {
.name = "pmi",
.of_match_table = pmi_match,
},
};
module_platform_driver(pmi_of_platform_driver);
int pmi_send_message(pmi_message_t msg)
{
unsigned long flags;
DECLARE_COMPLETION_ONSTACK(completion);
if (!data)
return -ENODEV;
mutex_lock(&data->msg_mutex);
data->msg = msg;
pr_debug("pmi_send_message: msg is %08x\n", *(u32*)&msg);
data->completion = &completion;
spin_lock_irqsave(&data->pmi_spinlock, flags);
iowrite8(msg.data0, data->pmi_reg + PMI_WRITE_DATA0);
iowrite8(msg.data1, data->pmi_reg + PMI_WRITE_DATA1);
iowrite8(msg.data2, data->pmi_reg + PMI_WRITE_DATA2);
iowrite8(msg.type, data->pmi_reg + PMI_WRITE_TYPE);
spin_unlock_irqrestore(&data->pmi_spinlock, flags);
pr_debug("pmi_send_message: wait for completion\n");
wait_for_completion_interruptible_timeout(data->completion,
PMI_TIMEOUT);
data->completion = NULL;
mutex_unlock(&data->msg_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(pmi_send_message);
int pmi_register_handler(struct pmi_handler *handler)
{
if (!data)
return -ENODEV;
spin_lock(&data->handler_spinlock);
list_add_tail(&handler->node, &data->handler);
spin_unlock(&data->handler_spinlock);
return 0;
}
EXPORT_SYMBOL_GPL(pmi_register_handler);
void pmi_unregister_handler(struct pmi_handler *handler)
{
if (!data)
return;
pr_debug("pmi: unregistering handler %p\n", handler);
spin_lock(&data->handler_spinlock);
list_del(&handler->node);
spin_unlock(&data->handler_spinlock);
}
EXPORT_SYMBOL_GPL(pmi_unregister_handler);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Krafft <[email protected]>");
MODULE_DESCRIPTION("IBM Platform Management Interrupt driver");
| linux-master | arch/powerpc/sysdev/pmi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale MPC85xx/MPC86xx RapidIO support
*
* Copyright 2009 Sysgo AG
* Thomas Moll <[email protected]>
* - fixed maintenance access routines, check for aligned access
*
* Copyright 2009 Integrated Device Technology, Inc.
* Alex Bounine <[email protected]>
* - Added Port-Write message handling
* - Added Machine Check exception handling
*
* Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
* Zhang Wei <[email protected]>
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <[email protected]>
*/
#include <linux/init.h>
#include <linux/extable.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/machdep.h>
#include <asm/rio.h>
#include "fsl_rio.h"
#undef DEBUG_PW /* Port-Write debugging */
#define RIO_PORT1_EDCSR 0x0640
#define RIO_PORT2_EDCSR 0x0680
#define RIO_PORT1_IECSR 0x10130
#define RIO_PORT2_IECSR 0x101B0
#define RIO_GCCSR 0x13c
#define RIO_ESCSR 0x158
#define ESCSR_CLEAR 0x07120204
#define RIO_PORT2_ESCSR 0x178
#define RIO_CCSR 0x15c
#define RIO_LTLEDCSR_IER 0x80000000
#define RIO_LTLEDCSR_PRT 0x01000000
#define IECSR_CLEAR 0x80000000
#define RIO_ISR_AACR 0x10120
#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */
#define RIWTAR_TRAD_VAL_SHIFT 12
#define RIWTAR_TRAD_MASK 0x00FFFFFF
#define RIWBAR_BADD_VAL_SHIFT 12
#define RIWBAR_BADD_MASK 0x003FFFFF
#define RIWAR_ENABLE 0x80000000
#define RIWAR_TGINT_LOCAL 0x00F00000
#define RIWAR_RDTYP_NO_SNOOP 0x00040000
#define RIWAR_RDTYP_SNOOP 0x00050000
#define RIWAR_WRTYP_NO_SNOOP 0x00004000
#define RIWAR_WRTYP_SNOOP 0x00005000
#define RIWAR_WRTYP_ALLOC 0x00006000
#define RIWAR_SIZE_MASK 0x0000003F
static DEFINE_SPINLOCK(fsl_rio_config_lock);
#define ___fsl_read_rio_config(x, addr, err, op, barrier) \
__asm__ __volatile__( \
"1: "op" %1,0(%2)\n" \
" "barrier"\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: li %1,-1\n" \
" li %0,%3\n" \
" b 2b\n" \
".previous\n" \
EX_TABLE(1b, 3b) \
: "=r" (err), "=r" (x) \
: "b" (addr), "i" (-EFAULT), "0" (err))
#ifdef CONFIG_BOOKE
#define __fsl_read_rio_config(x, addr, err, op) \
___fsl_read_rio_config(x, addr, err, op, "mbar")
#else
#define __fsl_read_rio_config(x, addr, err, op) \
___fsl_read_rio_config(x, addr, err, op, "eieio")
#endif
void __iomem *rio_regs_win;
void __iomem *rmu_regs_win;
resource_size_t rio_law_start;
struct fsl_rio_dbell *dbell;
struct fsl_rio_pw *pw;
#ifdef CONFIG_PPC_E500
int fsl_rio_mcheck_exception(struct pt_regs *regs)
{
const struct exception_table_entry *entry;
unsigned long reason;
if (!rio_regs_win)
return 0;
reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR));
if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) {
/* Check if we are prepared to handle this fault */
entry = search_exception_tables(regs->nip);
if (entry) {
pr_debug("RIO: %s - MC Exception handled\n",
__func__);
out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
0);
regs_set_recoverable(regs);
regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception);
#endif
/**
* fsl_local_config_read - Generate a MPC85xx local config space read
* @mport: RapidIO master port info
* @index: ID of RapdiIO interface
* @offset: Offset into configuration space
* @len: Length (in bytes) of the maintenance transaction
* @data: Value to be read into
*
* Generates a MPC85xx local configuration space read. Returns %0 on
* success or %-EINVAL on failure.
*/
static int fsl_local_config_read(struct rio_mport *mport,
int index, u32 offset, int len, u32 *data)
{
struct rio_priv *priv = mport->priv;
pr_debug("fsl_local_config_read: index %d offset %8.8x\n", index,
offset);
*data = in_be32(priv->regs_win + offset);
return 0;
}
/**
* fsl_local_config_write - Generate a MPC85xx local config space write
* @mport: RapidIO master port info
* @index: ID of RapdiIO interface
* @offset: Offset into configuration space
* @len: Length (in bytes) of the maintenance transaction
* @data: Value to be written
*
* Generates a MPC85xx local configuration space write. Returns %0 on
* success or %-EINVAL on failure.
*/
static int fsl_local_config_write(struct rio_mport *mport,
int index, u32 offset, int len, u32 data)
{
struct rio_priv *priv = mport->priv;
pr_debug
("fsl_local_config_write: index %d offset %8.8x data %8.8x\n",
index, offset, data);
out_be32(priv->regs_win + offset, data);
return 0;
}
/**
* fsl_rio_config_read - Generate a MPC85xx read maintenance transaction
* @mport: RapidIO master port info
* @index: ID of RapdiIO interface
* @destid: Destination ID of transaction
* @hopcount: Number of hops to target device
* @offset: Offset into configuration space
* @len: Length (in bytes) of the maintenance transaction
* @val: Location to be read into
*
* Generates a MPC85xx read maintenance transaction. Returns %0 on
* success or %-EINVAL on failure.
*/
static int
fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
u8 hopcount, u32 offset, int len, u32 *val)
{
struct rio_priv *priv = mport->priv;
unsigned long flags;
u8 *data;
u32 rval, err = 0;
pr_debug
("fsl_rio_config_read:"
" index %d destid %d hopcount %d offset %8.8x len %d\n",
index, destid, hopcount, offset, len);
/* 16MB maintenance window possible */
/* allow only aligned access to maintenance registers */
if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
return -EINVAL;
spin_lock_irqsave(&fsl_rio_config_lock, flags);
out_be32(&priv->maint_atmu_regs->rowtar,
(destid << 22) | (hopcount << 12) | (offset >> 12));
out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
switch (len) {
case 1:
__fsl_read_rio_config(rval, data, err, "lbz");
break;
case 2:
__fsl_read_rio_config(rval, data, err, "lhz");
break;
case 4:
__fsl_read_rio_config(rval, data, err, "lwz");
break;
default:
spin_unlock_irqrestore(&fsl_rio_config_lock, flags);
return -EINVAL;
}
if (err) {
pr_debug("RIO: cfg_read error %d for %x:%x:%x\n",
err, destid, hopcount, offset);
}
spin_unlock_irqrestore(&fsl_rio_config_lock, flags);
*val = rval;
return err;
}
/**
* fsl_rio_config_write - Generate a MPC85xx write maintenance transaction
* @mport: RapidIO master port info
* @index: ID of RapdiIO interface
* @destid: Destination ID of transaction
* @hopcount: Number of hops to target device
* @offset: Offset into configuration space
* @len: Length (in bytes) of the maintenance transaction
* @val: Value to be written
*
* Generates an MPC85xx write maintenance transaction. Returns %0 on
* success or %-EINVAL on failure.
*/
static int
fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
u8 hopcount, u32 offset, int len, u32 val)
{
struct rio_priv *priv = mport->priv;
unsigned long flags;
u8 *data;
int ret = 0;
pr_debug
("fsl_rio_config_write:"
" index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
index, destid, hopcount, offset, len, val);
/* 16MB maintenance windows possible */
/* allow only aligned access to maintenance registers */
if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len))
return -EINVAL;
spin_lock_irqsave(&fsl_rio_config_lock, flags);
out_be32(&priv->maint_atmu_regs->rowtar,
(destid << 22) | (hopcount << 12) | (offset >> 12));
out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10));
data = (u8 *) priv->maint_win + (offset & (RIO_MAINT_WIN_SIZE - 1));
switch (len) {
case 1:
out_8((u8 *) data, val);
break;
case 2:
out_be16((u16 *) data, val);
break;
case 4:
out_be32((u32 *) data, val);
break;
default:
ret = -EINVAL;
}
spin_unlock_irqrestore(&fsl_rio_config_lock, flags);
return ret;
}
static void fsl_rio_inbound_mem_init(struct rio_priv *priv)
{
int i;
/* close inbound windows */
for (i = 0; i < RIO_INB_ATMU_COUNT; i++)
out_be32(&priv->inb_atmu_regs[i].riwar, 0);
}
static int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
u64 rstart, u64 size, u32 flags)
{
struct rio_priv *priv = mport->priv;
u32 base_size;
unsigned int base_size_log;
u64 win_start, win_end;
u32 riwar;
int i;
if ((size & (size - 1)) != 0 || size > 0x400000000ULL)
return -EINVAL;
base_size_log = ilog2(size);
base_size = 1 << base_size_log;
/* check if addresses are aligned with the window size */
if (lstart & (base_size - 1))
return -EINVAL;
if (rstart & (base_size - 1))
return -EINVAL;
/* check for conflicting ranges */
for (i = 0; i < RIO_INB_ATMU_COUNT; i++) {
riwar = in_be32(&priv->inb_atmu_regs[i].riwar);
if ((riwar & RIWAR_ENABLE) == 0)
continue;
win_start = ((u64)(in_be32(&priv->inb_atmu_regs[i].riwbar) & RIWBAR_BADD_MASK))
<< RIWBAR_BADD_VAL_SHIFT;
win_end = win_start + ((1 << ((riwar & RIWAR_SIZE_MASK) + 1)) - 1);
if (rstart < win_end && (rstart + size) > win_start)
return -EINVAL;
}
/* find unused atmu */
for (i = 0; i < RIO_INB_ATMU_COUNT; i++) {
riwar = in_be32(&priv->inb_atmu_regs[i].riwar);
if ((riwar & RIWAR_ENABLE) == 0)
break;
}
if (i >= RIO_INB_ATMU_COUNT)
return -ENOMEM;
out_be32(&priv->inb_atmu_regs[i].riwtar, lstart >> RIWTAR_TRAD_VAL_SHIFT);
out_be32(&priv->inb_atmu_regs[i].riwbar, rstart >> RIWBAR_BADD_VAL_SHIFT);
out_be32(&priv->inb_atmu_regs[i].riwar, RIWAR_ENABLE | RIWAR_TGINT_LOCAL |
RIWAR_RDTYP_SNOOP | RIWAR_WRTYP_SNOOP | (base_size_log - 1));
return 0;
}
static void fsl_unmap_inb_mem(struct rio_mport *mport, dma_addr_t lstart)
{
u32 win_start_shift, base_start_shift;
struct rio_priv *priv = mport->priv;
u32 riwar, riwtar;
int i;
/* skip default window */
base_start_shift = lstart >> RIWTAR_TRAD_VAL_SHIFT;
for (i = 0; i < RIO_INB_ATMU_COUNT; i++) {
riwar = in_be32(&priv->inb_atmu_regs[i].riwar);
if ((riwar & RIWAR_ENABLE) == 0)
continue;
riwtar = in_be32(&priv->inb_atmu_regs[i].riwtar);
win_start_shift = riwtar & RIWTAR_TRAD_MASK;
if (win_start_shift == base_start_shift) {
out_be32(&priv->inb_atmu_regs[i].riwar, riwar & ~RIWAR_ENABLE);
return;
}
}
}
void fsl_rio_port_error_handler(int offset)
{
/*XXX: Error recovery is not implemented, we just clear errors */
out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
if (offset == 0) {
out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0);
out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), IECSR_CLEAR);
out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR);
} else {
out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0);
out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), IECSR_CLEAR);
out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR);
}
}
static inline void fsl_rio_info(struct device *dev, u32 ccsr)
{
const char *str;
if (ccsr & 1) {
/* Serial phy */
switch (ccsr >> 30) {
case 0:
str = "1";
break;
case 1:
str = "4";
break;
default:
str = "Unknown";
break;
}
dev_info(dev, "Hardware port width: %s\n", str);
switch ((ccsr >> 27) & 7) {
case 0:
str = "Single-lane 0";
break;
case 1:
str = "Single-lane 2";
break;
case 2:
str = "Four-lane";
break;
default:
str = "Unknown";
break;
}
dev_info(dev, "Training connection status: %s\n", str);
} else {
/* Parallel phy */
if (!(ccsr & 0x80000000))
dev_info(dev, "Output port operating in 8-bit mode\n");
if (!(ccsr & 0x08000000))
dev_info(dev, "Input port operating in 8-bit mode\n");
}
}
/**
* fsl_rio_setup - Setup Freescale PowerPC RapidIO interface
* @dev: platform_device pointer
*
* Initializes MPC85xx RapidIO hardware interface, configures
* master port with system-specific info, and registers the
* master port with the RapidIO subsystem.
*/
static int fsl_rio_setup(struct platform_device *dev)
{
struct rio_ops *ops;
struct rio_mport *port;
struct rio_priv *priv;
int rc = 0;
const u32 *port_index;
u32 active_ports = 0;
struct device_node *np, *rmu_node;
u32 ccsr;
u64 range_start;
u32 i;
static int tmp;
struct device_node *rmu_np[MAX_MSG_UNIT_NUM] = {NULL};
if (!dev->dev.of_node) {
dev_err(&dev->dev, "Device OF-Node is NULL");
return -ENODEV;
}
rio_regs_win = of_iomap(dev->dev.of_node, 0);
if (!rio_regs_win) {
dev_err(&dev->dev, "Unable to map rio register window\n");
rc = -ENOMEM;
goto err_rio_regs;
}
ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
if (!ops) {
rc = -ENOMEM;
goto err_ops;
}
ops->lcread = fsl_local_config_read;
ops->lcwrite = fsl_local_config_write;
ops->cread = fsl_rio_config_read;
ops->cwrite = fsl_rio_config_write;
ops->dsend = fsl_rio_doorbell_send;
ops->pwenable = fsl_rio_pw_enable;
ops->open_outb_mbox = fsl_open_outb_mbox;
ops->open_inb_mbox = fsl_open_inb_mbox;
ops->close_outb_mbox = fsl_close_outb_mbox;
ops->close_inb_mbox = fsl_close_inb_mbox;
ops->add_outb_message = fsl_add_outb_message;
ops->add_inb_buffer = fsl_add_inb_buffer;
ops->get_inb_message = fsl_get_inb_message;
ops->map_inb = fsl_map_inb_mem;
ops->unmap_inb = fsl_unmap_inb_mem;
rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0);
if (!rmu_node) {
dev_err(&dev->dev, "No valid fsl,srio-rmu-handle property\n");
rc = -ENOENT;
goto err_rmu;
}
rmu_regs_win = of_iomap(rmu_node, 0);
of_node_put(rmu_node);
if (!rmu_regs_win) {
dev_err(&dev->dev, "Unable to map rmu register window\n");
rc = -ENOMEM;
goto err_rmu;
}
for_each_compatible_node(np, NULL, "fsl,srio-msg-unit") {
rmu_np[tmp] = np;
tmp++;
}
/*set up doobell node*/
np = of_find_compatible_node(NULL, NULL, "fsl,srio-dbell-unit");
if (!np) {
dev_err(&dev->dev, "No fsl,srio-dbell-unit node\n");
rc = -ENODEV;
goto err_dbell;
}
dbell = kzalloc(sizeof(struct fsl_rio_dbell), GFP_KERNEL);
if (!(dbell)) {
dev_err(&dev->dev, "Can't alloc memory for 'fsl_rio_dbell'\n");
rc = -ENOMEM;
goto err_dbell;
}
dbell->dev = &dev->dev;
dbell->bellirq = irq_of_parse_and_map(np, 1);
dev_info(&dev->dev, "bellirq: %d\n", dbell->bellirq);
if (of_property_read_reg(np, 0, &range_start, NULL)) {
pr_err("%pOF: unable to find 'reg' property\n",
np);
rc = -ENOMEM;
goto err_pw;
}
dbell->dbell_regs = (struct rio_dbell_regs *)(rmu_regs_win +
(u32)range_start);
/*set up port write node*/
np = of_find_compatible_node(NULL, NULL, "fsl,srio-port-write-unit");
if (!np) {
dev_err(&dev->dev, "No fsl,srio-port-write-unit node\n");
rc = -ENODEV;
goto err_pw;
}
pw = kzalloc(sizeof(struct fsl_rio_pw), GFP_KERNEL);
if (!(pw)) {
dev_err(&dev->dev, "Can't alloc memory for 'fsl_rio_pw'\n");
rc = -ENOMEM;
goto err_pw;
}
pw->dev = &dev->dev;
pw->pwirq = irq_of_parse_and_map(np, 0);
dev_info(&dev->dev, "pwirq: %d\n", pw->pwirq);
if (of_property_read_reg(np, 0, &range_start, NULL)) {
pr_err("%pOF: unable to find 'reg' property\n",
np);
rc = -ENOMEM;
goto err;
}
pw->pw_regs = (struct rio_pw_regs *)(rmu_regs_win + (u32)range_start);
/*set up ports node*/
for_each_child_of_node(dev->dev.of_node, np) {
struct resource res;
port_index = of_get_property(np, "cell-index", NULL);
if (!port_index) {
dev_err(&dev->dev, "Can't get %pOF property 'cell-index'\n",
np);
continue;
}
if (of_range_to_resource(np, 0, &res)) {
dev_err(&dev->dev, "Can't get %pOF property 'ranges'\n",
np);
continue;
}
dev_info(&dev->dev, "%pOF: LAW %pR\n",
np, &res);
port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
if (!port)
continue;
rc = rio_mport_initialize(port);
if (rc) {
kfree(port);
continue;
}
i = *port_index - 1;
port->index = (unsigned char)i;
priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL);
if (!priv) {
dev_err(&dev->dev, "Can't alloc memory for 'priv'\n");
kfree(port);
continue;
}
INIT_LIST_HEAD(&port->dbells);
port->iores = res; /* struct copy */
port->iores.name = "rio_io_win";
if (request_resource(&iomem_resource, &port->iores) < 0) {
dev_err(&dev->dev, "RIO: Error requesting master port region"
" 0x%016llx-0x%016llx\n",
(u64)port->iores.start, (u64)port->iores.end);
kfree(priv);
kfree(port);
continue;
}
sprintf(port->name, "RIO mport %d", i);
priv->dev = &dev->dev;
port->dev.parent = &dev->dev;
port->ops = ops;
port->priv = priv;
port->phys_efptr = 0x100;
port->phys_rmap = 1;
priv->regs_win = rio_regs_win;
ccsr = in_be32(priv->regs_win + RIO_CCSR + i*0x20);
/* Checking the port training status */
if (in_be32((priv->regs_win + RIO_ESCSR + i*0x20)) & 1) {
dev_err(&dev->dev, "Port %d is not ready. "
"Try to restart connection...\n", i);
/* Disable ports */
out_be32(priv->regs_win
+ RIO_CCSR + i*0x20, 0);
/* Set 1x lane */
setbits32(priv->regs_win
+ RIO_CCSR + i*0x20, 0x02000000);
/* Enable ports */
setbits32(priv->regs_win
+ RIO_CCSR + i*0x20, 0x00600000);
msleep(100);
if (in_be32((priv->regs_win
+ RIO_ESCSR + i*0x20)) & 1) {
dev_err(&dev->dev,
"Port %d restart failed.\n", i);
release_resource(&port->iores);
kfree(priv);
kfree(port);
continue;
}
dev_info(&dev->dev, "Port %d restart success!\n", i);
}
fsl_rio_info(&dev->dev, ccsr);
port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR))
& RIO_PEF_CTLS) >> 4;
dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
port->sys_size ? 65536 : 256);
if (port->host_deviceid >= 0)
out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST |
RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED);
else
out_be32(priv->regs_win + RIO_GCCSR,
RIO_PORT_GEN_MASTER);
priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
+ ((i == 0) ? RIO_ATMU_REGS_PORT1_OFFSET :
RIO_ATMU_REGS_PORT2_OFFSET));
priv->maint_atmu_regs = priv->atmu_regs + 1;
priv->inb_atmu_regs = (struct rio_inb_atmu_regs __iomem *)
(priv->regs_win +
((i == 0) ? RIO_INB_ATMU_REGS_PORT1_OFFSET :
RIO_INB_ATMU_REGS_PORT2_OFFSET));
/* Set to receive packets with any dest ID */
out_be32((priv->regs_win + RIO_ISR_AACR + i*0x80),
RIO_ISR_AACR_AA);
/* Configure maintenance transaction window */
out_be32(&priv->maint_atmu_regs->rowbar,
port->iores.start >> 12);
out_be32(&priv->maint_atmu_regs->rowar,
0x80077000 | (ilog2(RIO_MAINT_WIN_SIZE) - 1));
priv->maint_win = ioremap(port->iores.start,
RIO_MAINT_WIN_SIZE);
rio_law_start = range_start;
fsl_rio_setup_rmu(port, rmu_np[i]);
fsl_rio_inbound_mem_init(priv);
dbell->mport[i] = port;
pw->mport[i] = port;
if (rio_register_mport(port)) {
release_resource(&port->iores);
kfree(priv);
kfree(port);
continue;
}
active_ports++;
}
if (!active_ports) {
rc = -ENOLINK;
goto err;
}
fsl_rio_doorbell_init(dbell);
fsl_rio_port_write_init(pw);
return 0;
err:
kfree(pw);
pw = NULL;
err_pw:
kfree(dbell);
dbell = NULL;
err_dbell:
iounmap(rmu_regs_win);
rmu_regs_win = NULL;
err_rmu:
kfree(ops);
err_ops:
iounmap(rio_regs_win);
rio_regs_win = NULL;
err_rio_regs:
return rc;
}
/* The probe function for RapidIO peer-to-peer network.
*/
static int fsl_of_rio_rpn_probe(struct platform_device *dev)
{
printk(KERN_INFO "Setting up RapidIO peer-to-peer network %pOF\n",
dev->dev.of_node);
return fsl_rio_setup(dev);
};
static const struct of_device_id fsl_of_rio_rpn_ids[] = {
{
.compatible = "fsl,srio",
},
{},
};
static struct platform_driver fsl_of_rio_rpn_driver = {
.driver = {
.name = "fsl-of-rio",
.of_match_table = fsl_of_rio_rpn_ids,
},
.probe = fsl_of_rio_rpn_probe,
};
static __init int fsl_of_rio_rpn_init(void)
{
return platform_driver_register(&fsl_of_rio_rpn_driver);
}
subsys_initcall(fsl_of_rio_rpn_init);
| linux-master | arch/powerpc/sysdev/fsl_rio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* i8259 interrupt controller driver.
*/
#undef DEBUG
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/i8259.h>
static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
static unsigned char cached_8259[2] = { 0xff, 0xff };
#define cached_A1 (cached_8259[0])
#define cached_21 (cached_8259[1])
static DEFINE_RAW_SPINLOCK(i8259_lock);
static struct irq_domain *i8259_host;
/*
* Acknowledge the IRQ using either the PCI host bridge's interrupt
* acknowledge feature or poll. How i8259_init() is called determines
* which is called. It should be noted that polling is broken on some
* IBM and Motorola PReP boxes so we must use the int-ack feature on them.
*/
unsigned int i8259_irq(void)
{
int irq;
int lock = 0;
/* Either int-ack or poll for the IRQ */
if (pci_intack)
irq = readb(pci_intack);
else {
raw_spin_lock(&i8259_lock);
lock = 1;
/* Perform an interrupt acknowledge cycle on controller 1. */
outb(0x0C, 0x20); /* prepare for poll */
irq = inb(0x20) & 7;
if (irq == 2 ) {
/*
* Interrupt is cascaded so perform interrupt
* acknowledge on controller 2.
*/
outb(0x0C, 0xA0); /* prepare for poll */
irq = (inb(0xA0) & 7) + 8;
}
}
if (irq == 7) {
/*
* This may be a spurious interrupt.
*
* Read the interrupt status register (ISR). If the most
* significant bit is not set then there is no valid
* interrupt.
*/
if (!pci_intack)
outb(0x0B, 0x20); /* ISR register */
if(~inb(0x20) & 0x80)
irq = 0;
} else if (irq == 0xff)
irq = 0;
if (lock)
raw_spin_unlock(&i8259_lock);
return irq;
}
static void i8259_mask_and_ack_irq(struct irq_data *d)
{
unsigned long flags;
raw_spin_lock_irqsave(&i8259_lock, flags);
if (d->irq > 7) {
cached_A1 |= 1 << (d->irq-8);
inb(0xA1); /* DUMMY */
outb(cached_A1, 0xA1);
outb(0x20, 0xA0); /* Non-specific EOI */
outb(0x20, 0x20); /* Non-specific EOI to cascade */
} else {
cached_21 |= 1 << d->irq;
inb(0x21); /* DUMMY */
outb(cached_21, 0x21);
outb(0x20, 0x20); /* Non-specific EOI */
}
raw_spin_unlock_irqrestore(&i8259_lock, flags);
}
static void i8259_set_irq_mask(int irq_nr)
{
outb(cached_A1,0xA1);
outb(cached_21,0x21);
}
static void i8259_mask_irq(struct irq_data *d)
{
unsigned long flags;
pr_debug("i8259_mask_irq(%d)\n", d->irq);
raw_spin_lock_irqsave(&i8259_lock, flags);
if (d->irq < 8)
cached_21 |= 1 << d->irq;
else
cached_A1 |= 1 << (d->irq-8);
i8259_set_irq_mask(d->irq);
raw_spin_unlock_irqrestore(&i8259_lock, flags);
}
static void i8259_unmask_irq(struct irq_data *d)
{
unsigned long flags;
pr_debug("i8259_unmask_irq(%d)\n", d->irq);
raw_spin_lock_irqsave(&i8259_lock, flags);
if (d->irq < 8)
cached_21 &= ~(1 << d->irq);
else
cached_A1 &= ~(1 << (d->irq-8));
i8259_set_irq_mask(d->irq);
raw_spin_unlock_irqrestore(&i8259_lock, flags);
}
static struct irq_chip i8259_pic = {
.name = "i8259",
.irq_mask = i8259_mask_irq,
.irq_disable = i8259_mask_irq,
.irq_unmask = i8259_unmask_irq,
.irq_mask_ack = i8259_mask_and_ack_irq,
};
static struct resource pic1_iores = {
.name = "8259 (master)",
.start = 0x20,
.end = 0x21,
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
};
static struct resource pic2_iores = {
.name = "8259 (slave)",
.start = 0xa0,
.end = 0xa1,
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
};
static struct resource pic_edgectrl_iores = {
.name = "8259 edge control",
.start = 0x4d0,
.end = 0x4d1,
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
};
static int i8259_host_match(struct irq_domain *h, struct device_node *node,
enum irq_domain_bus_token bus_token)
{
struct device_node *of_node = irq_domain_get_of_node(h);
return of_node == NULL || of_node == node;
}
static int i8259_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw);
/* We block the internal cascade */
if (hw == 2)
irq_set_status_flags(virq, IRQ_NOREQUEST);
/* We use the level handler only for now, we might want to
* be more cautious here but that works for now
*/
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq);
return 0;
}
static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
static unsigned char map_isa_senses[4] = {
IRQ_TYPE_LEVEL_LOW,
IRQ_TYPE_LEVEL_HIGH,
IRQ_TYPE_EDGE_FALLING,
IRQ_TYPE_EDGE_RISING,
};
*out_hwirq = intspec[0];
if (intsize > 1 && intspec[1] < 4)
*out_flags = map_isa_senses[intspec[1]];
else
*out_flags = IRQ_TYPE_NONE;
return 0;
}
static const struct irq_domain_ops i8259_host_ops = {
.match = i8259_host_match,
.map = i8259_host_map,
.xlate = i8259_host_xlate,
};
struct irq_domain *__init i8259_get_host(void)
{
return i8259_host;
}
/**
* i8259_init - Initialize the legacy controller
* @node: device node of the legacy PIC (can be NULL, but then, it will match
* all interrupts, so beware)
* @intack_addr: PCI interrupt acknowledge (real) address which will return
* the active irq from the 8259
*/
void i8259_init(struct device_node *node, unsigned long intack_addr)
{
unsigned long flags;
/* initialize the controller */
raw_spin_lock_irqsave(&i8259_lock, flags);
/* Mask all first */
outb(0xff, 0xA1);
outb(0xff, 0x21);
/* init master interrupt controller */
outb(0x11, 0x20); /* Start init sequence */
outb(0x00, 0x21); /* Vector base */
outb(0x04, 0x21); /* edge triggered, Cascade (slave) on IRQ2 */
outb(0x01, 0x21); /* Select 8086 mode */
/* init slave interrupt controller */
outb(0x11, 0xA0); /* Start init sequence */
outb(0x08, 0xA1); /* Vector base */
outb(0x02, 0xA1); /* edge triggered, Cascade (slave) on IRQ2 */
outb(0x01, 0xA1); /* Select 8086 mode */
/* That thing is slow */
udelay(100);
/* always read ISR */
outb(0x0B, 0x20);
outb(0x0B, 0xA0);
/* Unmask the internal cascade */
cached_21 &= ~(1 << 2);
/* Set interrupt masks */
outb(cached_A1, 0xA1);
outb(cached_21, 0x21);
raw_spin_unlock_irqrestore(&i8259_lock, flags);
/* create a legacy host */
i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
&i8259_host_ops, NULL);
if (i8259_host == NULL) {
printk(KERN_ERR "i8259: failed to allocate irq host !\n");
return;
}
/* reserve our resources */
/* XXX should we continue doing that ? it seems to cause problems
* with further requesting of PCI IO resources for that range...
* need to look into it.
*/
request_resource(&ioport_resource, &pic1_iores);
request_resource(&ioport_resource, &pic2_iores);
request_resource(&ioport_resource, &pic_edgectrl_iores);
if (intack_addr != 0)
pci_intack = ioremap(intack_addr, 1);
printk(KERN_INFO "i8259 legacy interrupt controller initialized\n");
}
| linux-master | arch/powerpc/sysdev/i8259.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* A udbg backend which logs messages and reads input from in memory
* buffers.
*
* The console output can be read from memcons_output which is a
* circular buffer whose next write position is stored in memcons.output_pos.
*
* Input may be passed by writing into the memcons_input buffer when it is
* empty. The input buffer is empty when both input_pos == input_start and
* *input_start == '\0'.
*
* Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp
* Copyright (C) 2013 Alistair Popple, IBM Corp
*/
#include <linux/kernel.h>
#include <asm/barrier.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/udbg.h>
struct memcons {
char *output_start;
char *output_pos;
char *output_end;
char *input_start;
char *input_pos;
char *input_end;
};
static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE];
static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE];
struct memcons memcons = {
.output_start = memcons_output,
.output_pos = memcons_output,
.output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE],
.input_start = memcons_input,
.input_pos = memcons_input,
.input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE],
};
void memcons_putc(char c)
{
char *new_output_pos;
*memcons.output_pos = c;
wmb();
new_output_pos = memcons.output_pos + 1;
if (new_output_pos >= memcons.output_end)
new_output_pos = memcons.output_start;
memcons.output_pos = new_output_pos;
}
int memcons_getc_poll(void)
{
char c;
char *new_input_pos;
if (*memcons.input_pos) {
c = *memcons.input_pos;
new_input_pos = memcons.input_pos + 1;
if (new_input_pos >= memcons.input_end)
new_input_pos = memcons.input_start;
else if (*new_input_pos == '\0')
new_input_pos = memcons.input_start;
*memcons.input_pos = '\0';
wmb();
memcons.input_pos = new_input_pos;
return c;
}
return -1;
}
int memcons_getc(void)
{
int c;
while (1) {
c = memcons_getc_poll();
if (c == -1)
cpu_relax();
else
break;
}
return c;
}
void __init udbg_init_memcons(void)
{
udbg_putc = memcons_putc;
udbg_getc = memcons_getc;
udbg_getc_poll = memcons_getc_poll;
}
| linux-master | arch/powerpc/sysdev/udbg_memcons.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Common CPM GPIO wrapper for the CPM GPIO ports
*
* Author: Christophe Leroy <[email protected]>
*
* Copyright 2017 CS Systemes d'Information.
*
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <asm/cpm.h>
#ifdef CONFIG_8xx_GPIO
#include <asm/cpm1.h>
#endif
static int cpm_gpio_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
int (*gp_add)(struct device *dev) = of_device_get_match_data(dev);
if (!gp_add)
return -ENODEV;
return gp_add(dev);
}
static const struct of_device_id cpm_gpio_match[] = {
#ifdef CONFIG_8xx_GPIO
{
.compatible = "fsl,cpm1-pario-bank-a",
.data = cpm1_gpiochip_add16,
},
{
.compatible = "fsl,cpm1-pario-bank-b",
.data = cpm1_gpiochip_add32,
},
{
.compatible = "fsl,cpm1-pario-bank-c",
.data = cpm1_gpiochip_add16,
},
{
.compatible = "fsl,cpm1-pario-bank-d",
.data = cpm1_gpiochip_add16,
},
/* Port E uses CPM2 layout */
{
.compatible = "fsl,cpm1-pario-bank-e",
.data = cpm2_gpiochip_add32,
},
#endif
{
.compatible = "fsl,cpm2-pario-bank",
.data = cpm2_gpiochip_add32,
},
{},
};
MODULE_DEVICE_TABLE(of, cpm_gpio_match);
static struct platform_driver cpm_gpio_driver = {
.probe = cpm_gpio_probe,
.driver = {
.name = "cpm-gpio",
.of_match_table = cpm_gpio_match,
},
};
static int __init cpm_gpio_init(void)
{
return platform_driver_register(&cpm_gpio_driver);
}
arch_initcall(cpm_gpio_init);
MODULE_AUTHOR("Christophe Leroy <[email protected]>");
MODULE_DESCRIPTION("Driver for CPM GPIO");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:cpm-gpio");
| linux-master | arch/powerpc/sysdev/cpm_gpio.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp.
* <[email protected]>
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/of_address.h>
#include <asm/dcr.h>
#ifdef CONFIG_PPC_DCR_MMIO
static struct device_node *find_dcr_parent(struct device_node *node)
{
struct device_node *par, *tmp;
const u32 *p;
for (par = of_node_get(node); par;) {
if (of_property_read_bool(par, "dcr-controller"))
break;
p = of_get_property(par, "dcr-parent", NULL);
tmp = par;
if (p == NULL)
par = of_get_parent(par);
else
par = of_find_node_by_phandle(*p);
of_node_put(tmp);
}
return par;
}
#endif
#if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO)
bool dcr_map_ok_generic(dcr_host_t host)
{
if (host.type == DCR_HOST_NATIVE)
return dcr_map_ok_native(host.host.native);
else if (host.type == DCR_HOST_MMIO)
return dcr_map_ok_mmio(host.host.mmio);
else
return false;
}
EXPORT_SYMBOL_GPL(dcr_map_ok_generic);
dcr_host_t dcr_map_generic(struct device_node *dev,
unsigned int dcr_n,
unsigned int dcr_c)
{
dcr_host_t host;
struct device_node *dp;
const char *prop;
host.type = DCR_HOST_INVALID;
dp = find_dcr_parent(dev);
if (dp == NULL)
return host;
prop = of_get_property(dp, "dcr-access-method", NULL);
pr_debug("dcr_map_generic(dcr-access-method = %s)\n", prop);
if (!strcmp(prop, "native")) {
host.type = DCR_HOST_NATIVE;
host.host.native = dcr_map_native(dev, dcr_n, dcr_c);
} else if (!strcmp(prop, "mmio")) {
host.type = DCR_HOST_MMIO;
host.host.mmio = dcr_map_mmio(dev, dcr_n, dcr_c);
}
of_node_put(dp);
return host;
}
EXPORT_SYMBOL_GPL(dcr_map_generic);
void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c)
{
if (host.type == DCR_HOST_NATIVE)
dcr_unmap_native(host.host.native, dcr_c);
else if (host.type == DCR_HOST_MMIO)
dcr_unmap_mmio(host.host.mmio, dcr_c);
else /* host.type == DCR_HOST_INVALID */
WARN_ON(true);
}
EXPORT_SYMBOL_GPL(dcr_unmap_generic);
u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n)
{
if (host.type == DCR_HOST_NATIVE)
return dcr_read_native(host.host.native, dcr_n);
else if (host.type == DCR_HOST_MMIO)
return dcr_read_mmio(host.host.mmio, dcr_n);
else /* host.type == DCR_HOST_INVALID */
WARN_ON(true);
return 0;
}
EXPORT_SYMBOL_GPL(dcr_read_generic);
void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value)
{
if (host.type == DCR_HOST_NATIVE)
dcr_write_native(host.host.native, dcr_n, value);
else if (host.type == DCR_HOST_MMIO)
dcr_write_mmio(host.host.mmio, dcr_n, value);
else /* host.type == DCR_HOST_INVALID */
WARN_ON(true);
}
EXPORT_SYMBOL_GPL(dcr_write_generic);
#endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */
unsigned int dcr_resource_start(const struct device_node *np,
unsigned int index)
{
unsigned int ds;
const u32 *dr = of_get_property(np, "dcr-reg", &ds);
if (dr == NULL || ds & 1 || index >= (ds / 8))
return 0;
return dr[index * 2];
}
EXPORT_SYMBOL_GPL(dcr_resource_start);
unsigned int dcr_resource_len(const struct device_node *np, unsigned int index)
{
unsigned int ds;
const u32 *dr = of_get_property(np, "dcr-reg", &ds);
if (dr == NULL || ds & 1 || index >= (ds / 8))
return 0;
return dr[index * 2 + 1];
}
EXPORT_SYMBOL_GPL(dcr_resource_len);
#ifdef CONFIG_PPC_DCR_MMIO
static u64 of_translate_dcr_address(struct device_node *dev,
unsigned int dcr_n,
unsigned int *out_stride)
{
struct device_node *dp;
const u32 *p;
unsigned int stride;
u64 ret = OF_BAD_ADDR;
dp = find_dcr_parent(dev);
if (dp == NULL)
return OF_BAD_ADDR;
/* Stride is not properly defined yet, default to 0x10 for Axon */
p = of_get_property(dp, "dcr-mmio-stride", NULL);
stride = (p == NULL) ? 0x10 : *p;
/* XXX FIXME: Which property name is to use of the 2 following ? */
p = of_get_property(dp, "dcr-mmio-range", NULL);
if (p == NULL)
p = of_get_property(dp, "dcr-mmio-space", NULL);
if (p == NULL)
goto done;
/* Maybe could do some better range checking here */
ret = of_translate_address(dp, p);
if (ret != OF_BAD_ADDR)
ret += (u64)(stride) * (u64)dcr_n;
if (out_stride)
*out_stride = stride;
done:
of_node_put(dp);
return ret;
}
dcr_host_mmio_t dcr_map_mmio(struct device_node *dev,
unsigned int dcr_n,
unsigned int dcr_c)
{
dcr_host_mmio_t ret = { .token = NULL, .stride = 0, .base = dcr_n };
u64 addr;
pr_debug("dcr_map(%pOF, 0x%x, 0x%x)\n",
dev, dcr_n, dcr_c);
addr = of_translate_dcr_address(dev, dcr_n, &ret.stride);
pr_debug("translates to addr: 0x%llx, stride: 0x%x\n",
(unsigned long long) addr, ret.stride);
if (addr == OF_BAD_ADDR)
return ret;
pr_debug("mapping 0x%x bytes\n", dcr_c * ret.stride);
ret.token = ioremap(addr, dcr_c * ret.stride);
if (ret.token == NULL)
return ret;
pr_debug("mapped at 0x%p -> base is 0x%p\n",
ret.token, ret.token - dcr_n * ret.stride);
ret.token -= dcr_n * ret.stride;
return ret;
}
EXPORT_SYMBOL_GPL(dcr_map_mmio);
void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c)
{
dcr_host_mmio_t h = host;
if (h.token == NULL)
return;
h.token += host.base * h.stride;
iounmap(h.token);
h.token = NULL;
}
EXPORT_SYMBOL_GPL(dcr_unmap_mmio);
#endif /* defined(CONFIG_PPC_DCR_MMIO) */
#ifdef CONFIG_PPC_DCR_NATIVE
DEFINE_SPINLOCK(dcr_ind_lock);
EXPORT_SYMBOL_GPL(dcr_ind_lock);
#endif /* defined(CONFIG_PPC_DCR_NATIVE) */
| linux-master | arch/powerpc/sysdev/dcr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Common routines for Tundra Semiconductor TSI108 host bridge.
*
* 2004-2005 (c) Tundra Semiconductor Corp.
* Author: Alex Bounine ([email protected])
* Author: Roy Zang ([email protected])
* Add pci interrupt router host
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/uaccess.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/tsi108.h>
#include <asm/tsi108_pci.h>
#include <asm/tsi108_irq.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
#define tsi_mk_config_addr(bus, devfunc, offset) \
((((bus)<<16) | ((devfunc)<<8) | (offset & 0xfc)) + tsi108_pci_cfg_base)
u32 tsi108_pci_cfg_base;
static u32 tsi108_pci_cfg_phys;
u32 tsi108_csr_vir_base;
static struct irq_domain *pci_irq_host;
extern u32 get_vir_csrbase(void);
extern u32 tsi108_read_reg(u32 reg_offset);
extern void tsi108_write_reg(u32 reg_offset, u32 val);
int
tsi108_direct_write_config(struct pci_bus *bus, unsigned int devfunc,
int offset, int len, u32 val)
{
volatile unsigned char *cfg_addr;
struct pci_controller *hose = pci_bus_to_host(bus);
if (ppc_md.pci_exclude_device)
if (ppc_md.pci_exclude_device(hose, bus->number, devfunc))
return PCIBIOS_DEVICE_NOT_FOUND;
cfg_addr = (unsigned char *)(tsi_mk_config_addr(bus->number,
devfunc, offset) |
(offset & 0x03));
#ifdef DEBUG
printk("PCI CFG write : ");
printk("%d:0x%x:0x%x ", bus->number, devfunc, offset);
printk("%d ADDR=0x%08x ", len, (uint) cfg_addr);
printk("data = 0x%08x\n", val);
#endif
switch (len) {
case 1:
out_8((u8 *) cfg_addr, val);
break;
case 2:
out_le16((u16 *) cfg_addr, val);
break;
default:
out_le32((u32 *) cfg_addr, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
void tsi108_clear_pci_error(u32 pci_cfg_base)
{
u32 err_stat, err_addr, pci_stat;
/*
* Quietly clear PB and PCI error flags set as result
* of PCI/X configuration read requests.
*/
/* Read PB Error Log Registers */
err_stat = tsi108_read_reg(TSI108_PB_OFFSET + TSI108_PB_ERRCS);
err_addr = tsi108_read_reg(TSI108_PB_OFFSET + TSI108_PB_AERR);
if (err_stat & TSI108_PB_ERRCS_ES) {
/* Clear error flag */
tsi108_write_reg(TSI108_PB_OFFSET + TSI108_PB_ERRCS,
TSI108_PB_ERRCS_ES);
/* Clear read error reported in PB_ISR */
tsi108_write_reg(TSI108_PB_OFFSET + TSI108_PB_ISR,
TSI108_PB_ISR_PBS_RD_ERR);
/* Clear PCI/X bus cfg errors if applicable */
if ((err_addr & 0xFF000000) == pci_cfg_base) {
pci_stat =
tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_CSR);
tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_CSR,
pci_stat);
}
}
return;
}
#define __tsi108_read_pci_config(x, addr, op) \
__asm__ __volatile__( \
" "op" %0,0,%1\n" \
"1: eieio\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: li %0,-1\n" \
" b 2b\n" \
".previous\n" \
EX_TABLE(1b, 3b) \
: "=r"(x) : "r"(addr))
int
tsi108_direct_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
int len, u32 * val)
{
volatile unsigned char *cfg_addr;
struct pci_controller *hose = pci_bus_to_host(bus);
u32 temp;
if (ppc_md.pci_exclude_device)
if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
cfg_addr = (unsigned char *)(tsi_mk_config_addr(bus->number,
devfn,
offset) | (offset &
0x03));
switch (len) {
case 1:
__tsi108_read_pci_config(temp, cfg_addr, "lbzx");
break;
case 2:
__tsi108_read_pci_config(temp, cfg_addr, "lhbrx");
break;
default:
__tsi108_read_pci_config(temp, cfg_addr, "lwbrx");
break;
}
*val = temp;
#ifdef DEBUG
if ((0xFFFFFFFF != temp) && (0xFFFF != temp) && (0xFF != temp)) {
printk("PCI CFG read : ");
printk("%d:0x%x:0x%x ", bus->number, devfn, offset);
printk("%d ADDR=0x%08x ", len, (uint) cfg_addr);
printk("data = 0x%x\n", *val);
}
#endif
return PCIBIOS_SUCCESSFUL;
}
void tsi108_clear_pci_cfg_error(void)
{
tsi108_clear_pci_error(tsi108_pci_cfg_phys);
}
static struct pci_ops tsi108_direct_pci_ops = {
.read = tsi108_direct_read_config,
.write = tsi108_direct_write_config,
};
int __init tsi108_setup_pci(struct device_node *dev, u32 cfg_phys, int primary)
{
int len;
struct pci_controller *hose;
struct resource rsrc;
const int *bus_range;
int has_address = 0;
/* PCI Config mapping */
tsi108_pci_cfg_base = (u32)ioremap(cfg_phys, TSI108_PCI_CFG_SIZE);
tsi108_pci_cfg_phys = cfg_phys;
DBG("TSI_PCI: %s tsi108_pci_cfg_base=0x%x\n", __func__,
tsi108_pci_cfg_base);
/* Fetch host bridge registers address */
has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
" bus 0\n", dev);
}
hose = pcibios_alloc_controller(dev);
if (!hose) {
printk("PCI Host bridge init failed\n");
return -ENOMEM;
}
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
(hose)->ops = &tsi108_direct_pci_ops;
pr_info("Found tsi108 PCI host bridge at 0x%pa. Firmware bus number: %d->%d\n",
&rsrc.start, hose->first_busno, hose->last_busno);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, primary);
return 0;
}
/*
* Low level utility functions
*/
static void tsi108_pci_int_mask(u_int irq)
{
u_int irp_cfg;
int int_line = (irq - IRQ_PCI_INTAD_BASE);
irp_cfg = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL);
mb();
irp_cfg |= (1 << int_line); /* INTx_DIR = output */
irp_cfg &= ~(3 << (8 + (int_line * 2))); /* INTx_TYPE = unused */
tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL, irp_cfg);
mb();
irp_cfg = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL);
}
static void tsi108_pci_int_unmask(u_int irq)
{
u_int irp_cfg;
int int_line = (irq - IRQ_PCI_INTAD_BASE);
irp_cfg = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL);
mb();
irp_cfg &= ~(1 << int_line);
irp_cfg |= (3 << (8 + (int_line * 2)));
tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL, irp_cfg);
mb();
}
static void __init init_pci_source(void)
{
tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL,
0x0000ff00);
tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE,
TSI108_PCI_IRP_ENABLE_P_INT);
mb();
}
static inline unsigned int get_pci_source(void)
{
u_int temp = 0;
int irq = -1;
int i;
u_int pci_irp_stat;
static int mask = 0;
/* Read PCI/X block interrupt status register */
pci_irp_stat = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_STAT);
mb();
if (pci_irp_stat & TSI108_PCI_IRP_STAT_P_INT) {
/* Process Interrupt from PCI bus INTA# - INTD# lines */
temp =
tsi108_read_reg(TSI108_PCI_OFFSET +
TSI108_PCI_IRP_INTAD) & 0xf;
mb();
for (i = 0; i < 4; i++, mask++) {
if (temp & (1 << mask % 4)) {
irq = IRQ_PCI_INTA + mask % 4;
mask++;
break;
}
}
/* Disable interrupts from PCI block */
temp = tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE);
tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE,
temp & ~TSI108_PCI_IRP_ENABLE_P_INT);
mb();
(void)tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE);
mb();
}
#ifdef DEBUG
else {
printk("TSI108_PIC: error in TSI108_PCI_IRP_STAT\n");
pci_irp_stat =
tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_STAT);
temp =
tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_INTAD);
mb();
printk(">> stat=0x%08x intad=0x%08x ", pci_irp_stat, temp);
temp =
tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL);
mb();
printk("cfg_ctl=0x%08x ", temp);
temp =
tsi108_read_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE);
mb();
printk("irp_enable=0x%08x\n", temp);
}
#endif /* end of DEBUG */
return irq;
}
/*
* Linux descriptor level callbacks
*/
static void tsi108_pci_irq_unmask(struct irq_data *d)
{
tsi108_pci_int_unmask(d->irq);
/* Enable interrupts from PCI block */
tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE,
tsi108_read_reg(TSI108_PCI_OFFSET +
TSI108_PCI_IRP_ENABLE) |
TSI108_PCI_IRP_ENABLE_P_INT);
mb();
}
static void tsi108_pci_irq_mask(struct irq_data *d)
{
tsi108_pci_int_mask(d->irq);
}
static void tsi108_pci_irq_ack(struct irq_data *d)
{
tsi108_pci_int_mask(d->irq);
}
/*
* Interrupt controller descriptor for cascaded PCI interrupt controller.
*/
static struct irq_chip tsi108_pci_irq = {
.name = "tsi108_PCI_int",
.irq_mask = tsi108_pci_irq_mask,
.irq_ack = tsi108_pci_irq_ack,
.irq_unmask = tsi108_pci_irq_unmask,
};
static int pci_irq_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
*out_hwirq = intspec[0];
*out_flags = IRQ_TYPE_LEVEL_HIGH;
return 0;
}
static int pci_irq_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{ unsigned int irq;
DBG("%s(%d, 0x%lx)\n", __func__, virq, hw);
if ((virq >= 1) && (virq <= 4)){
irq = virq + IRQ_PCI_INTAD_BASE - 1;
irq_set_status_flags(irq, IRQ_LEVEL);
irq_set_chip(irq, &tsi108_pci_irq);
}
return 0;
}
static const struct irq_domain_ops pci_irq_domain_ops = {
.map = pci_irq_host_map,
.xlate = pci_irq_host_xlate,
};
/*
* Exported functions
*/
/*
* The Tsi108 PCI interrupts initialization routine.
*
* The INTA# - INTD# interrupts on the PCI bus are reported by the PCI block
* to the MPIC using single interrupt source (IRQ_TSI108_PCI). Therefore the
* PCI block has to be treated as a cascaded interrupt controller connected
* to the MPIC.
*/
void __init tsi108_pci_int_init(struct device_node *node)
{
DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
pci_irq_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
&pci_irq_domain_ops, NULL);
if (pci_irq_host == NULL) {
printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n");
return;
}
init_pci_source();
}
void tsi108_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq = get_pci_source();
if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
| linux-master | arch/powerpc/sysdev/tsi108_pci.c |
/*
* Driver for ePAPR Embedded Hypervisor PIC
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
*
* Author: Ashish Kalra <[email protected]>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/ehv_pic.h>
#include <asm/fsl_hcalls.h>
static struct ehv_pic *global_ehv_pic;
static DEFINE_SPINLOCK(ehv_pic_lock);
static u32 hwirq_intspec[NR_EHV_PIC_INTS];
static u32 __iomem *mpic_percpu_base_vaddr;
#define IRQ_TYPE_MPIC_DIRECT 4
#define MPIC_EOI 0x00B0
/*
* Linux descriptor level callbacks
*/
static void ehv_pic_unmask_irq(struct irq_data *d)
{
unsigned int src = virq_to_hw(d->irq);
ev_int_set_mask(src, 0);
}
static void ehv_pic_mask_irq(struct irq_data *d)
{
unsigned int src = virq_to_hw(d->irq);
ev_int_set_mask(src, 1);
}
static void ehv_pic_end_irq(struct irq_data *d)
{
unsigned int src = virq_to_hw(d->irq);
ev_int_eoi(src);
}
static void ehv_pic_direct_end_irq(struct irq_data *d)
{
out_be32(mpic_percpu_base_vaddr + MPIC_EOI / 4, 0);
}
static int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
bool force)
{
unsigned int src = virq_to_hw(d->irq);
unsigned int config, prio, cpu_dest;
int cpuid = irq_choose_cpu(dest);
unsigned long flags;
spin_lock_irqsave(&ehv_pic_lock, flags);
ev_int_get_config(src, &config, &prio, &cpu_dest);
ev_int_set_config(src, config, prio, cpuid);
spin_unlock_irqrestore(&ehv_pic_lock, flags);
return IRQ_SET_MASK_OK;
}
static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
{
/* Now convert sense value */
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
return EHV_PIC_INFO(VECPRI_SENSE_EDGE) |
EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
case IRQ_TYPE_LEVEL_HIGH:
return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
EHV_PIC_INFO(VECPRI_POLARITY_POSITIVE);
case IRQ_TYPE_LEVEL_LOW:
default:
return EHV_PIC_INFO(VECPRI_SENSE_LEVEL) |
EHV_PIC_INFO(VECPRI_POLARITY_NEGATIVE);
}
}
static int ehv_pic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
unsigned int src = virq_to_hw(d->irq);
unsigned int vecpri, vold, vnew, prio, cpu_dest;
unsigned long flags;
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_LEVEL_LOW;
irqd_set_trigger_type(d, flow_type);
vecpri = ehv_pic_type_to_vecpri(flow_type);
spin_lock_irqsave(&ehv_pic_lock, flags);
ev_int_get_config(src, &vold, &prio, &cpu_dest);
vnew = vold & ~(EHV_PIC_INFO(VECPRI_POLARITY_MASK) |
EHV_PIC_INFO(VECPRI_SENSE_MASK));
vnew |= vecpri;
/*
* TODO : Add specific interface call for platform to set
* individual interrupt priorities.
* platform currently using static/default priority for all ints
*/
prio = 8;
ev_int_set_config(src, vecpri, prio, cpu_dest);
spin_unlock_irqrestore(&ehv_pic_lock, flags);
return IRQ_SET_MASK_OK_NOCOPY;
}
static struct irq_chip ehv_pic_irq_chip = {
.irq_mask = ehv_pic_mask_irq,
.irq_unmask = ehv_pic_unmask_irq,
.irq_eoi = ehv_pic_end_irq,
.irq_set_type = ehv_pic_set_irq_type,
};
static struct irq_chip ehv_pic_direct_eoi_irq_chip = {
.irq_mask = ehv_pic_mask_irq,
.irq_unmask = ehv_pic_unmask_irq,
.irq_eoi = ehv_pic_direct_end_irq,
.irq_set_type = ehv_pic_set_irq_type,
};
/* Return an interrupt vector or 0 if no interrupt is pending. */
unsigned int ehv_pic_get_irq(void)
{
int irq;
BUG_ON(global_ehv_pic == NULL);
if (global_ehv_pic->coreint_flag)
irq = mfspr(SPRN_EPR); /* if core int mode */
else
ev_int_iack(0, &irq); /* legacy mode */
if (irq == 0xFFFF) /* 0xFFFF --> no irq is pending */
return 0;
/*
* this will also setup revmap[] in the slow path for the first
* time, next calls will always use fast path by indexing revmap
*/
return irq_linear_revmap(global_ehv_pic->irqhost, irq);
}
static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node,
enum irq_domain_bus_token bus_token)
{
/* Exact match, unless ehv_pic node is NULL */
struct device_node *of_node = irq_domain_get_of_node(h);
return of_node == NULL || of_node == node;
}
static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct ehv_pic *ehv_pic = h->host_data;
struct irq_chip *chip;
/* Default chip */
chip = &ehv_pic->hc_irq;
if (mpic_percpu_base_vaddr)
if (hwirq_intspec[hw] & IRQ_TYPE_MPIC_DIRECT)
chip = &ehv_pic_direct_eoi_irq_chip;
irq_set_chip_data(virq, chip);
/*
* using handle_fasteoi_irq as our irq handler, this will
* only call the eoi callback and suitable for the MPIC
* controller which set ISR/IPR automatically and clear the
* highest priority active interrupt in ISR/IPR when we do
* a specific eoi
*/
irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
/* Set default irq type */
irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
/*
* interrupt sense values coming from the guest device tree
* interrupt specifiers can have four possible sense and
* level encoding information and they need to
* be translated between firmware type & linux type.
*/
static unsigned char map_of_senses_to_linux_irqtype[4] = {
IRQ_TYPE_EDGE_FALLING,
IRQ_TYPE_EDGE_RISING,
IRQ_TYPE_LEVEL_LOW,
IRQ_TYPE_LEVEL_HIGH,
};
*out_hwirq = intspec[0];
if (intsize > 1) {
hwirq_intspec[intspec[0]] = intspec[1];
*out_flags = map_of_senses_to_linux_irqtype[intspec[1] &
~IRQ_TYPE_MPIC_DIRECT];
} else {
*out_flags = IRQ_TYPE_NONE;
}
return 0;
}
static const struct irq_domain_ops ehv_pic_host_ops = {
.match = ehv_pic_host_match,
.map = ehv_pic_host_map,
.xlate = ehv_pic_host_xlate,
};
void __init ehv_pic_init(void)
{
struct device_node *np, *np2;
struct ehv_pic *ehv_pic;
np = of_find_compatible_node(NULL, NULL, "epapr,hv-pic");
if (!np) {
pr_err("ehv_pic_init: could not find epapr,hv-pic node\n");
return;
}
ehv_pic = kzalloc(sizeof(struct ehv_pic), GFP_KERNEL);
if (!ehv_pic) {
of_node_put(np);
return;
}
ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS,
&ehv_pic_host_ops, ehv_pic);
if (!ehv_pic->irqhost) {
of_node_put(np);
kfree(ehv_pic);
return;
}
np2 = of_find_compatible_node(NULL, NULL, "fsl,hv-mpic-per-cpu");
if (np2) {
mpic_percpu_base_vaddr = of_iomap(np2, 0);
if (!mpic_percpu_base_vaddr)
pr_err("ehv_pic_init: of_iomap failed\n");
of_node_put(np2);
}
ehv_pic->hc_irq = ehv_pic_irq_chip;
ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
ehv_pic->coreint_flag = of_property_read_bool(np, "has-external-proxy");
global_ehv_pic = ehv_pic;
irq_set_default_host(global_ehv_pic->irqhost);
}
| linux-master | arch/powerpc/sysdev/ehv_pic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale LBC and UPM routines.
*
* Copyright © 2007-2008 MontaVista Software, Inc.
* Copyright © 2010 Freescale Semiconductor
*
* Author: Anton Vorontsov <[email protected]>
* Author: Jack Lan <[email protected]>
* Author: Roy Zang <[email protected]>
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/syscore_ops.h>
#include <asm/fsl_lbc.h>
static DEFINE_SPINLOCK(fsl_lbc_lock);
struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev;
EXPORT_SYMBOL(fsl_lbc_ctrl_dev);
/**
* fsl_lbc_addr - convert the base address
* @addr_base: base address of the memory bank
*
* This function converts a base address of lbc into the right format for the
* BR register. If the SOC has eLBC then it returns 32bit physical address
* else it converts a 34bit local bus physical address to correct format of
* 32bit address for BR register (Example: MPC8641).
*/
u32 fsl_lbc_addr(phys_addr_t addr_base)
{
struct device_node *np = fsl_lbc_ctrl_dev->dev->of_node;
u32 addr = addr_base & 0xffff8000;
if (of_device_is_compatible(np, "fsl,elbc"))
return addr;
return addr | ((addr_base & 0x300000000ull) >> 19);
}
EXPORT_SYMBOL(fsl_lbc_addr);
/**
* fsl_lbc_find - find Localbus bank
* @addr_base: base address of the memory bank
*
* This function walks LBC banks comparing "Base address" field of the BR
* registers with the supplied addr_base argument. When bases match this
* function returns bank number (starting with 0), otherwise it returns
* appropriate errno value.
*/
int fsl_lbc_find(phys_addr_t addr_base)
{
int i;
struct fsl_lbc_regs __iomem *lbc;
if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
return -ENODEV;
lbc = fsl_lbc_ctrl_dev->regs;
for (i = 0; i < ARRAY_SIZE(lbc->bank); i++) {
u32 br = in_be32(&lbc->bank[i].br);
u32 or = in_be32(&lbc->bank[i].or);
if (br & BR_V && (br & or & BR_BA) == fsl_lbc_addr(addr_base))
return i;
}
return -ENOENT;
}
EXPORT_SYMBOL(fsl_lbc_find);
/**
* fsl_upm_find - find pre-programmed UPM via base address
* @addr_base: base address of the memory bank controlled by the UPM
* @upm: pointer to the allocated fsl_upm structure
*
* This function fills fsl_upm structure so you can use it with the rest of
* UPM API. On success this function returns 0, otherwise it returns
* appropriate errno value.
*/
int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm)
{
int bank;
u32 br;
struct fsl_lbc_regs __iomem *lbc;
bank = fsl_lbc_find(addr_base);
if (bank < 0)
return bank;
if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
return -ENODEV;
lbc = fsl_lbc_ctrl_dev->regs;
br = in_be32(&lbc->bank[bank].br);
switch (br & BR_MSEL) {
case BR_MS_UPMA:
upm->mxmr = &lbc->mamr;
break;
case BR_MS_UPMB:
upm->mxmr = &lbc->mbmr;
break;
case BR_MS_UPMC:
upm->mxmr = &lbc->mcmr;
break;
default:
return -EINVAL;
}
switch (br & BR_PS) {
case BR_PS_8:
upm->width = 8;
break;
case BR_PS_16:
upm->width = 16;
break;
case BR_PS_32:
upm->width = 32;
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(fsl_upm_find);
/**
* fsl_upm_run_pattern - actually run an UPM pattern
* @upm: pointer to the fsl_upm structure obtained via fsl_upm_find
* @io_base: remapped pointer to where memory access should happen
* @mar: MAR register content during pattern execution
*
* This function triggers dummy write to the memory specified by the io_base,
* thus UPM pattern actually executed. Note that mar usage depends on the
* pre-programmed AMX bits in the UPM RAM.
*/
int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
{
int ret = 0;
unsigned long flags;
if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
return -ENODEV;
spin_lock_irqsave(&fsl_lbc_lock, flags);
out_be32(&fsl_lbc_ctrl_dev->regs->mar, mar);
switch (upm->width) {
case 8:
out_8(io_base, 0x0);
break;
case 16:
out_be16(io_base, 0x0);
break;
case 32:
out_be32(io_base, 0x0);
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&fsl_lbc_lock, flags);
return ret;
}
EXPORT_SYMBOL(fsl_upm_run_pattern);
static int fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl,
struct device_node *node)
{
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
/* clear event registers */
setbits32(&lbc->ltesr, LTESR_CLEAR);
out_be32(&lbc->lteatr, 0);
out_be32(&lbc->ltear, 0);
out_be32(&lbc->lteccr, LTECCR_CLEAR);
out_be32(&lbc->ltedr, LTEDR_ENABLE);
/* Set the monitor timeout value to the maximum for erratum A001 */
if (of_device_is_compatible(node, "fsl,elbc"))
clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS);
return 0;
}
/*
* NOTE: This interrupt is used to report localbus events of various kinds,
* such as transaction errors on the chipselects.
*/
static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
{
struct fsl_lbc_ctrl *ctrl = data;
struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
u32 status;
unsigned long flags;
spin_lock_irqsave(&fsl_lbc_lock, flags);
status = in_be32(&lbc->ltesr);
if (!status) {
spin_unlock_irqrestore(&fsl_lbc_lock, flags);
return IRQ_NONE;
}
out_be32(&lbc->ltesr, LTESR_CLEAR);
out_be32(&lbc->lteatr, 0);
out_be32(&lbc->ltear, 0);
ctrl->irq_status = status;
if (status & LTESR_BM)
dev_err(ctrl->dev, "Local bus monitor time-out: "
"LTESR 0x%08X\n", status);
if (status & LTESR_WP)
dev_err(ctrl->dev, "Write protect error: "
"LTESR 0x%08X\n", status);
if (status & LTESR_ATMW)
dev_err(ctrl->dev, "Atomic write error: "
"LTESR 0x%08X\n", status);
if (status & LTESR_ATMR)
dev_err(ctrl->dev, "Atomic read error: "
"LTESR 0x%08X\n", status);
if (status & LTESR_CS)
dev_err(ctrl->dev, "Chip select error: "
"LTESR 0x%08X\n", status);
if (status & LTESR_FCT) {
dev_err(ctrl->dev, "FCM command time-out: "
"LTESR 0x%08X\n", status);
smp_wmb();
wake_up(&ctrl->irq_wait);
}
if (status & LTESR_PAR) {
dev_err(ctrl->dev, "Parity or Uncorrectable ECC error: "
"LTESR 0x%08X\n", status);
smp_wmb();
wake_up(&ctrl->irq_wait);
}
if (status & LTESR_CC) {
smp_wmb();
wake_up(&ctrl->irq_wait);
}
if (status & ~LTESR_MASK)
dev_err(ctrl->dev, "Unknown error: "
"LTESR 0x%08X\n", status);
spin_unlock_irqrestore(&fsl_lbc_lock, flags);
return IRQ_HANDLED;
}
/*
* fsl_lbc_ctrl_probe
*
* called by device layer when it finds a device matching
* one our driver can handled. This code allocates all of
* the resources needed for the controller only. The
* resources for the NAND banks themselves are allocated
* in the chip probe function.
*/
static int fsl_lbc_ctrl_probe(struct platform_device *dev)
{
int ret;
if (!dev->dev.of_node) {
dev_err(&dev->dev, "Device OF-Node is NULL");
return -EFAULT;
}
fsl_lbc_ctrl_dev = kzalloc(sizeof(*fsl_lbc_ctrl_dev), GFP_KERNEL);
if (!fsl_lbc_ctrl_dev)
return -ENOMEM;
dev_set_drvdata(&dev->dev, fsl_lbc_ctrl_dev);
spin_lock_init(&fsl_lbc_ctrl_dev->lock);
init_waitqueue_head(&fsl_lbc_ctrl_dev->irq_wait);
fsl_lbc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0);
if (!fsl_lbc_ctrl_dev->regs) {
dev_err(&dev->dev, "failed to get memory region\n");
ret = -ENODEV;
goto err;
}
fsl_lbc_ctrl_dev->irq[0] = irq_of_parse_and_map(dev->dev.of_node, 0);
if (!fsl_lbc_ctrl_dev->irq[0]) {
dev_err(&dev->dev, "failed to get irq resource\n");
ret = -ENODEV;
goto err;
}
fsl_lbc_ctrl_dev->dev = &dev->dev;
ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev, dev->dev.of_node);
if (ret < 0)
goto err;
ret = request_irq(fsl_lbc_ctrl_dev->irq[0], fsl_lbc_ctrl_irq, 0,
"fsl-lbc", fsl_lbc_ctrl_dev);
if (ret != 0) {
dev_err(&dev->dev, "failed to install irq (%d)\n",
fsl_lbc_ctrl_dev->irq[0]);
ret = fsl_lbc_ctrl_dev->irq[0];
goto err;
}
fsl_lbc_ctrl_dev->irq[1] = irq_of_parse_and_map(dev->dev.of_node, 1);
if (fsl_lbc_ctrl_dev->irq[1]) {
ret = request_irq(fsl_lbc_ctrl_dev->irq[1], fsl_lbc_ctrl_irq,
IRQF_SHARED, "fsl-lbc-err", fsl_lbc_ctrl_dev);
if (ret) {
dev_err(&dev->dev, "failed to install irq (%d)\n",
fsl_lbc_ctrl_dev->irq[1]);
ret = fsl_lbc_ctrl_dev->irq[1];
goto err1;
}
}
/* Enable interrupts for any detected events */
out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE);
return 0;
err1:
free_irq(fsl_lbc_ctrl_dev->irq[0], fsl_lbc_ctrl_dev);
err:
iounmap(fsl_lbc_ctrl_dev->regs);
kfree(fsl_lbc_ctrl_dev);
fsl_lbc_ctrl_dev = NULL;
return ret;
}
#ifdef CONFIG_SUSPEND
/* save lbc registers */
static int fsl_lbc_syscore_suspend(void)
{
struct fsl_lbc_ctrl *ctrl;
struct fsl_lbc_regs __iomem *lbc;
ctrl = fsl_lbc_ctrl_dev;
if (!ctrl)
goto out;
lbc = ctrl->regs;
if (!lbc)
goto out;
ctrl->saved_regs = kmalloc(sizeof(struct fsl_lbc_regs), GFP_KERNEL);
if (!ctrl->saved_regs)
return -ENOMEM;
_memcpy_fromio(ctrl->saved_regs, lbc, sizeof(struct fsl_lbc_regs));
out:
return 0;
}
/* restore lbc registers */
static void fsl_lbc_syscore_resume(void)
{
struct fsl_lbc_ctrl *ctrl;
struct fsl_lbc_regs __iomem *lbc;
ctrl = fsl_lbc_ctrl_dev;
if (!ctrl)
goto out;
lbc = ctrl->regs;
if (!lbc)
goto out;
if (ctrl->saved_regs) {
_memcpy_toio(lbc, ctrl->saved_regs,
sizeof(struct fsl_lbc_regs));
kfree(ctrl->saved_regs);
ctrl->saved_regs = NULL;
}
out:
return;
}
#endif /* CONFIG_SUSPEND */
static const struct of_device_id fsl_lbc_match[] = {
{ .compatible = "fsl,elbc", },
{ .compatible = "fsl,pq3-localbus", },
{ .compatible = "fsl,pq2-localbus", },
{ .compatible = "fsl,pq2pro-localbus", },
{},
};
#ifdef CONFIG_SUSPEND
static struct syscore_ops lbc_syscore_pm_ops = {
.suspend = fsl_lbc_syscore_suspend,
.resume = fsl_lbc_syscore_resume,
};
#endif
static struct platform_driver fsl_lbc_ctrl_driver = {
.driver = {
.name = "fsl-lbc",
.of_match_table = fsl_lbc_match,
},
.probe = fsl_lbc_ctrl_probe,
};
static int __init fsl_lbc_init(void)
{
#ifdef CONFIG_SUSPEND
register_syscore_ops(&lbc_syscore_pm_ops);
#endif
return platform_driver_register(&fsl_lbc_ctrl_driver);
}
subsys_initcall(fsl_lbc_init);
| linux-master | arch/powerpc/sysdev/fsl_lbc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2006-2008, Michael Ellerman, IBM Corporation.
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/kmemleak.h>
#include <linux/bitmap.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <asm/msi_bitmap.h>
#include <asm/setup.h>
int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num)
{
unsigned long flags;
int offset, order = get_count_order(num);
spin_lock_irqsave(&bmp->lock, flags);
offset = bitmap_find_next_zero_area(bmp->bitmap, bmp->irq_count, 0,
num, (1 << order) - 1);
if (offset > bmp->irq_count)
goto err;
bitmap_set(bmp->bitmap, offset, num);
spin_unlock_irqrestore(&bmp->lock, flags);
pr_debug("msi_bitmap: allocated 0x%x at offset 0x%x\n", num, offset);
return offset;
err:
spin_unlock_irqrestore(&bmp->lock, flags);
return -ENOMEM;
}
EXPORT_SYMBOL(msi_bitmap_alloc_hwirqs);
void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset,
unsigned int num)
{
unsigned long flags;
pr_debug("msi_bitmap: freeing 0x%x at offset 0x%x\n",
num, offset);
spin_lock_irqsave(&bmp->lock, flags);
bitmap_clear(bmp->bitmap, offset, num);
spin_unlock_irqrestore(&bmp->lock, flags);
}
EXPORT_SYMBOL(msi_bitmap_free_hwirqs);
void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq)
{
unsigned long flags;
pr_debug("msi_bitmap: reserving hwirq 0x%x\n", hwirq);
spin_lock_irqsave(&bmp->lock, flags);
bitmap_allocate_region(bmp->bitmap, hwirq, 0);
spin_unlock_irqrestore(&bmp->lock, flags);
}
/**
* msi_bitmap_reserve_dt_hwirqs - Reserve irqs specified in the device tree.
* @bmp: pointer to the MSI bitmap.
*
* Looks in the device tree to see if there is a property specifying which
* irqs can be used for MSI. If found those irqs reserved in the device tree
* are reserved in the bitmap.
*
* Returns 0 for success, < 0 if there was an error, and > 0 if no property
* was found in the device tree.
**/
int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp)
{
int i, j, len;
const u32 *p;
if (!bmp->of_node)
return 1;
p = of_get_property(bmp->of_node, "msi-available-ranges", &len);
if (!p) {
pr_debug("msi_bitmap: no msi-available-ranges property " \
"found on %pOF\n", bmp->of_node);
return 1;
}
if (len % (2 * sizeof(u32)) != 0) {
printk(KERN_WARNING "msi_bitmap: Malformed msi-available-ranges"
" property on %pOF\n", bmp->of_node);
return -EINVAL;
}
bitmap_allocate_region(bmp->bitmap, 0, get_count_order(bmp->irq_count));
spin_lock(&bmp->lock);
/* Format is: (<u32 start> <u32 count>)+ */
len /= 2 * sizeof(u32);
for (i = 0; i < len; i++, p += 2) {
for (j = 0; j < *(p + 1); j++)
bitmap_release_region(bmp->bitmap, *p + j, 0);
}
spin_unlock(&bmp->lock);
return 0;
}
int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
struct device_node *of_node)
{
int size;
if (!irq_count)
return -EINVAL;
size = BITS_TO_LONGS(irq_count) * sizeof(long);
pr_debug("msi_bitmap: allocator bitmap size is 0x%x bytes\n", size);
bmp->bitmap_from_slab = slab_is_available();
if (bmp->bitmap_from_slab)
bmp->bitmap = kzalloc(size, GFP_KERNEL);
else {
bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES);
if (!bmp->bitmap)
panic("%s: Failed to allocate %u bytes\n", __func__,
size);
/* the bitmap won't be freed from memblock allocator */
kmemleak_not_leak(bmp->bitmap);
}
if (!bmp->bitmap) {
pr_debug("msi_bitmap: ENOMEM allocating allocator bitmap!\n");
return -ENOMEM;
}
/* We zalloc'ed the bitmap, so all irqs are free by default */
spin_lock_init(&bmp->lock);
bmp->of_node = of_node_get(of_node);
bmp->irq_count = irq_count;
return 0;
}
void msi_bitmap_free(struct msi_bitmap *bmp)
{
if (bmp->bitmap_from_slab)
kfree(bmp->bitmap);
of_node_put(bmp->of_node);
bmp->bitmap = NULL;
}
#ifdef CONFIG_MSI_BITMAP_SELFTEST
static void __init test_basics(void)
{
struct msi_bitmap bmp;
int rc, i, size = 512;
/* Can't allocate a bitmap of 0 irqs */
WARN_ON(msi_bitmap_alloc(&bmp, 0, NULL) == 0);
/* of_node may be NULL */
WARN_ON(msi_bitmap_alloc(&bmp, size, NULL));
/* Should all be free by default */
WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size)));
bitmap_release_region(bmp.bitmap, 0, get_count_order(size));
/* With no node, there's no msi-available-ranges, so expect > 0 */
WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0);
/* Should all still be free */
WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size)));
bitmap_release_region(bmp.bitmap, 0, get_count_order(size));
/* Check we can fill it up and then no more */
for (i = 0; i < size; i++)
WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0);
WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0);
/* Should all be allocated */
WARN_ON(bitmap_find_free_region(bmp.bitmap, size, 0) >= 0);
/* And if we free one we can then allocate another */
msi_bitmap_free_hwirqs(&bmp, size / 2, 1);
WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) != size / 2);
/* Free most of them for the alignment tests */
msi_bitmap_free_hwirqs(&bmp, 3, size - 3);
/* Check we get a naturally aligned offset */
rc = msi_bitmap_alloc_hwirqs(&bmp, 2);
WARN_ON(rc < 0 && rc % 2 != 0);
rc = msi_bitmap_alloc_hwirqs(&bmp, 4);
WARN_ON(rc < 0 && rc % 4 != 0);
rc = msi_bitmap_alloc_hwirqs(&bmp, 8);
WARN_ON(rc < 0 && rc % 8 != 0);
rc = msi_bitmap_alloc_hwirqs(&bmp, 9);
WARN_ON(rc < 0 && rc % 16 != 0);
rc = msi_bitmap_alloc_hwirqs(&bmp, 3);
WARN_ON(rc < 0 && rc % 4 != 0);
rc = msi_bitmap_alloc_hwirqs(&bmp, 7);
WARN_ON(rc < 0 && rc % 8 != 0);
rc = msi_bitmap_alloc_hwirqs(&bmp, 121);
WARN_ON(rc < 0 && rc % 128 != 0);
msi_bitmap_free(&bmp);
/* Clients may WARN_ON bitmap == NULL for "not-allocated" */
WARN_ON(bmp.bitmap != NULL);
}
static void __init test_of_node(void)
{
u32 prop_data[] = { 10, 10, 25, 3, 40, 1, 100, 100, 200, 20 };
const char *expected_str = "0-9,20-24,28-39,41-99,220-255";
char *prop_name = "msi-available-ranges";
char *node_name = "/fakenode";
struct device_node of_node;
struct property prop;
struct msi_bitmap bmp;
#define SIZE_EXPECTED 256
DECLARE_BITMAP(expected, SIZE_EXPECTED);
/* There should really be a struct device_node allocator */
memset(&of_node, 0, sizeof(of_node));
of_node_init(&of_node);
of_node.full_name = node_name;
WARN_ON(msi_bitmap_alloc(&bmp, SIZE_EXPECTED, &of_node));
/* No msi-available-ranges, so expect > 0 */
WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0);
/* Should all still be free */
WARN_ON(bitmap_find_free_region(bmp.bitmap, SIZE_EXPECTED,
get_count_order(SIZE_EXPECTED)));
bitmap_release_region(bmp.bitmap, 0, get_count_order(SIZE_EXPECTED));
/* Now create a fake msi-available-ranges property */
/* There should really .. oh whatever */
memset(&prop, 0, sizeof(prop));
prop.name = prop_name;
prop.value = &prop_data;
prop.length = sizeof(prop_data);
of_node.properties = ∝
/* msi-available-ranges, so expect == 0 */
WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp));
/* Check we got the expected result */
WARN_ON(bitmap_parselist(expected_str, expected, SIZE_EXPECTED));
WARN_ON(!bitmap_equal(expected, bmp.bitmap, SIZE_EXPECTED));
msi_bitmap_free(&bmp);
kfree(bmp.bitmap);
}
static int __init msi_bitmap_selftest(void)
{
printk(KERN_DEBUG "Running MSI bitmap self-tests ...\n");
test_basics();
test_of_node();
return 0;
}
late_initcall(msi_bitmap_selftest);
#endif /* CONFIG_MSI_BITMAP_SELFTEST */
| linux-master | arch/powerpc/sysdev/msi_bitmap.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* FSL SoC setup code
*
* Maintained by Kumar Gala (see MAINTAINERS for contact information)
*
* 2006 (c) MontaVista Software, Inc.
* Vitaly Bordug <[email protected]>
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/major.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/export.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/spi/spi.h>
#include <linux/fsl_devices.h>
#include <linux/reboot.h>
#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <sysdev/fsl_soc.h>
#include <mm/mmu_decl.h>
#include <asm/cpm2.h>
#include <asm/fsl_hcalls.h> /* For the Freescale hypervisor */
static phys_addr_t immrbase = -1;
phys_addr_t get_immrbase(void)
{
struct device_node *soc;
if (immrbase != -1)
return immrbase;
soc = of_find_node_by_type(NULL, "soc");
if (soc) {
struct resource res;
if (!of_range_to_resource(soc, 0, &res))
immrbase = res.start;
of_node_put(soc);
}
return immrbase;
}
EXPORT_SYMBOL(get_immrbase);
u32 fsl_get_sys_freq(void)
{
static u32 sysfreq = -1;
struct device_node *soc;
if (sysfreq != -1)
return sysfreq;
soc = of_find_node_by_type(NULL, "soc");
if (!soc)
return -1;
of_property_read_u32(soc, "clock-frequency", &sysfreq);
if (sysfreq == -1 || !sysfreq)
of_property_read_u32(soc, "bus-frequency", &sysfreq);
of_node_put(soc);
return sysfreq;
}
EXPORT_SYMBOL(fsl_get_sys_freq);
#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
u32 get_brgfreq(void)
{
static u32 brgfreq = -1;
struct device_node *node;
if (brgfreq != -1)
return brgfreq;
node = of_find_compatible_node(NULL, NULL, "fsl,cpm-brg");
if (node) {
of_property_read_u32(node, "clock-frequency", &brgfreq);
of_node_put(node);
return brgfreq;
}
/* Legacy device binding -- will go away when no users are left. */
node = of_find_node_by_type(NULL, "cpm");
if (!node)
node = of_find_compatible_node(NULL, NULL, "fsl,qe");
if (!node)
node = of_find_node_by_type(NULL, "qe");
if (node) {
of_property_read_u32(node, "brg-frequency", &brgfreq);
if (brgfreq == -1 || !brgfreq)
if (!of_property_read_u32(node, "bus-frequency",
&brgfreq))
brgfreq /= 2;
of_node_put(node);
}
return brgfreq;
}
EXPORT_SYMBOL(get_brgfreq);
u32 get_baudrate(void)
{
static u32 fs_baudrate = -1;
struct device_node *node;
if (fs_baudrate != -1)
return fs_baudrate;
node = of_find_node_by_type(NULL, "serial");
if (node) {
of_property_read_u32(node, "current-speed", &fs_baudrate);
of_node_put(node);
}
return fs_baudrate;
}
EXPORT_SYMBOL(get_baudrate);
#endif /* CONFIG_CPM2 */
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
static __be32 __iomem *rstcr;
static int fsl_rstcr_restart(struct notifier_block *this,
unsigned long mode, void *cmd)
{
local_irq_disable();
/* set reset control register */
out_be32(rstcr, 0x2); /* HRESET_REQ */
return NOTIFY_DONE;
}
static int __init setup_rstcr(void)
{
struct device_node *np;
static struct notifier_block restart_handler = {
.notifier_call = fsl_rstcr_restart,
.priority = 128,
};
for_each_node_by_name(np, "global-utilities") {
if (of_property_read_bool(np, "fsl,has-rstcr")) {
rstcr = of_iomap(np, 0) + 0xb0;
if (!rstcr) {
printk (KERN_ERR "Error: reset control "
"register not mapped!\n");
} else {
register_restart_handler(&restart_handler);
}
break;
}
}
of_node_put(np);
return 0;
}
arch_initcall(setup_rstcr);
#endif
#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
struct platform_diu_data_ops diu_ops;
EXPORT_SYMBOL(diu_ops);
#endif
#ifdef CONFIG_EPAPR_PARAVIRT
/*
* Restart the current partition
*
* This function should be assigned to the ppc_md.restart function pointer,
* to initiate a partition restart when we're running under the Freescale
* hypervisor.
*/
void __noreturn fsl_hv_restart(char *cmd)
{
pr_info("hv restart\n");
fh_partition_restart(-1);
while (1) ;
}
/*
* Halt the current partition
*
* This function should be assigned to the pm_power_off and ppc_md.halt
* function pointers, to shut down the partition when we're running under
* the Freescale hypervisor.
*/
void __noreturn fsl_hv_halt(void)
{
pr_info("hv exit\n");
fh_partition_stop(-1);
while (1) ;
}
#endif
| linux-master | arch/powerpc/sysdev/fsl_soc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Support for indirect PCI bridges.
*
* Copyright (C) 1998 Gabriel Paubert.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
int __indirect_read_config(struct pci_controller *hose,
unsigned char bus_number, unsigned int devfn,
int offset, int len, u32 *val)
{
volatile void __iomem *cfg_data;
u8 cfg_type = 0;
u32 bus_no, reg;
if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) {
if (bus_number != hose->first_busno)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (ppc_md.pci_exclude_device)
if (ppc_md.pci_exclude_device(hose, bus_number, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
if (hose->indirect_type & PPC_INDIRECT_TYPE_SET_CFG_TYPE)
if (bus_number != hose->first_busno)
cfg_type = 1;
bus_no = (bus_number == hose->first_busno) ?
hose->self_busno : bus_number;
if (hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG)
reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
else
reg = offset & 0xfc;
if (hose->indirect_type & PPC_INDIRECT_TYPE_BIG_ENDIAN)
out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
else
out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
cfg_data = hose->cfg_data + (offset & 3);
switch (len) {
case 1:
*val = in_8(cfg_data);
break;
case 2:
*val = in_le16(cfg_data);
break;
default:
*val = in_le32(cfg_data);
break;
}
return PCIBIOS_SUCCESSFUL;
}
int indirect_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
return __indirect_read_config(hose, bus->number, devfn, offset, len,
val);
}
int indirect_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
volatile void __iomem *cfg_data;
u8 cfg_type = 0;
u32 bus_no, reg;
if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK) {
if (bus->number != hose->first_busno)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn != 0)
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (ppc_md.pci_exclude_device)
if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
if (hose->indirect_type & PPC_INDIRECT_TYPE_SET_CFG_TYPE)
if (bus->number != hose->first_busno)
cfg_type = 1;
bus_no = (bus->number == hose->first_busno) ?
hose->self_busno : bus->number;
if (hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG)
reg = ((offset & 0xf00) << 16) | (offset & 0xfc);
else
reg = offset & 0xfc;
if (hose->indirect_type & PPC_INDIRECT_TYPE_BIG_ENDIAN)
out_be32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
else
out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) |
(devfn << 8) | reg | cfg_type));
/* suppress setting of PCI_PRIMARY_BUS */
if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS)
if ((offset == PCI_PRIMARY_BUS) &&
(bus->number == hose->first_busno))
val &= 0xffffff00;
/* Workaround for PCI_28 Errata in 440EPx/GRx */
if ((hose->indirect_type & PPC_INDIRECT_TYPE_BROKEN_MRM) &&
offset == PCI_CACHE_LINE_SIZE) {
val = 0;
}
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
*/
cfg_data = hose->cfg_data + (offset & 3);
switch (len) {
case 1:
out_8(cfg_data, val);
break;
case 2:
out_le16(cfg_data, val);
break;
default:
out_le32(cfg_data, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops indirect_pci_ops =
{
.read = indirect_read_config,
.write = indirect_write_config,
};
void setup_indirect_pci(struct pci_controller *hose, resource_size_t cfg_addr,
resource_size_t cfg_data, u32 flags)
{
resource_size_t base = cfg_addr & PAGE_MASK;
void __iomem *mbase;
mbase = ioremap(base, PAGE_SIZE);
hose->cfg_addr = mbase + (cfg_addr & ~PAGE_MASK);
if ((cfg_data & PAGE_MASK) != base)
mbase = ioremap(cfg_data & PAGE_MASK, PAGE_SIZE);
hose->cfg_data = mbase + (cfg_data & ~PAGE_MASK);
hose->ops = &indirect_pci_ops;
hose->indirect_type = flags;
}
| linux-master | arch/powerpc/sysdev/indirect_pci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Freescale Semiconductor, Inc.
*
* Author: Varun Sethi <[email protected]>
*/
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/mpic.h>
#include "mpic.h"
#define MPIC_ERR_INT_BASE 0x3900
#define MPIC_ERR_INT_EISR 0x0000
#define MPIC_ERR_INT_EIMR 0x0010
static inline u32 mpic_fsl_err_read(u32 __iomem *base, unsigned int err_reg)
{
return in_be32(base + (err_reg >> 2));
}
static inline void mpic_fsl_err_write(u32 __iomem *base, u32 value)
{
out_be32(base + (MPIC_ERR_INT_EIMR >> 2), value);
}
static void fsl_mpic_mask_err(struct irq_data *d)
{
u32 eimr;
struct mpic *mpic = irq_data_get_irq_chip_data(d);
unsigned int src = virq_to_hw(d->irq) - mpic->err_int_vecs[0];
eimr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EIMR);
eimr |= (1 << (31 - src));
mpic_fsl_err_write(mpic->err_regs, eimr);
}
static void fsl_mpic_unmask_err(struct irq_data *d)
{
u32 eimr;
struct mpic *mpic = irq_data_get_irq_chip_data(d);
unsigned int src = virq_to_hw(d->irq) - mpic->err_int_vecs[0];
eimr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EIMR);
eimr &= ~(1 << (31 - src));
mpic_fsl_err_write(mpic->err_regs, eimr);
}
static struct irq_chip fsl_mpic_err_chip = {
.irq_disable = fsl_mpic_mask_err,
.irq_mask = fsl_mpic_mask_err,
.irq_unmask = fsl_mpic_unmask_err,
};
int __init mpic_setup_error_int(struct mpic *mpic, int intvec)
{
int i;
mpic->err_regs = ioremap(mpic->paddr + MPIC_ERR_INT_BASE, 0x1000);
if (!mpic->err_regs) {
pr_err("could not map mpic error registers\n");
return -ENOMEM;
}
mpic->hc_err = fsl_mpic_err_chip;
mpic->hc_err.name = mpic->name;
mpic->flags |= MPIC_FSL_HAS_EIMR;
/* allocate interrupt vectors for error interrupts */
for (i = MPIC_MAX_ERR - 1; i >= 0; i--)
mpic->err_int_vecs[i] = intvec--;
return 0;
}
int mpic_map_error_int(struct mpic *mpic, unsigned int virq, irq_hw_number_t hw)
{
if ((mpic->flags & MPIC_FSL_HAS_EIMR) &&
(hw >= mpic->err_int_vecs[0] &&
hw <= mpic->err_int_vecs[MPIC_MAX_ERR - 1])) {
WARN_ON(mpic->flags & MPIC_SECONDARY);
pr_debug("mpic: mapping as Error Interrupt\n");
irq_set_chip_data(virq, mpic);
irq_set_chip_and_handler(virq, &mpic->hc_err,
handle_level_irq);
return 1;
}
return 0;
}
static irqreturn_t fsl_error_int_handler(int irq, void *data)
{
struct mpic *mpic = (struct mpic *) data;
u32 eisr, eimr;
int errint;
eisr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EISR);
eimr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EIMR);
if (!(eisr & ~eimr))
return IRQ_NONE;
while (eisr) {
int ret;
errint = __builtin_clz(eisr);
ret = generic_handle_domain_irq(mpic->irqhost,
mpic->err_int_vecs[errint]);
if (WARN_ON(ret)) {
eimr |= 1 << (31 - errint);
mpic_fsl_err_write(mpic->err_regs, eimr);
}
eisr &= ~(1 << (31 - errint));
}
return IRQ_HANDLED;
}
void __init mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum)
{
unsigned int virq;
int ret;
virq = irq_create_mapping(mpic->irqhost, irqnum);
if (!virq) {
pr_err("Error interrupt setup failed\n");
return;
}
/* Mask all error interrupts */
mpic_fsl_err_write(mpic->err_regs, ~0);
ret = request_irq(virq, fsl_error_int_handler, IRQF_NO_THREAD,
"mpic-error-int", mpic);
if (ret)
pr_err("Failed to register error interrupt handler\n");
}
| linux-master | arch/powerpc/sysdev/fsl_mpic_err.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Instantiate mmio-mapped RTC chips based on device tree information
*
* Copyright 2007 David Gibson <[email protected]>, IBM Corporation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/prom.h>
static __initdata struct {
const char *compatible;
char *plat_name;
} of_rtc_table[] = {
{ "ds1743-nvram", "rtc-ds1742" },
};
void __init of_instantiate_rtc(void)
{
struct device_node *node;
int err;
int i;
for (i = 0; i < ARRAY_SIZE(of_rtc_table); i++) {
char *plat_name = of_rtc_table[i].plat_name;
for_each_compatible_node(node, NULL,
of_rtc_table[i].compatible) {
struct resource *res;
res = kmalloc(sizeof(*res), GFP_KERNEL);
if (!res) {
printk(KERN_ERR "OF RTC: Out of memory "
"allocating resource structure for %pOF\n",
node);
continue;
}
err = of_address_to_resource(node, 0, res);
if (err) {
printk(KERN_ERR "OF RTC: Error "
"translating resources for %pOF\n",
node);
continue;
}
printk(KERN_INFO "OF_RTC: %pOF is a %s @ 0x%llx-0x%llx\n",
node, plat_name,
(unsigned long long)res->start,
(unsigned long long)res->end);
platform_device_register_simple(plat_name, -1, res, 1);
}
}
}
| linux-master | arch/powerpc/sysdev/of_rtc.c |
/*
* Setup code for PC-style Real-Time Clock.
*
* Author: Wade Farnsworth <[email protected]>
*
* 2007 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mc146818rtc.h>
#include <linux/of_address.h>
static int __init add_rtc(void)
{
struct device_node *np;
struct platform_device *pd;
struct resource res[2];
unsigned int num_res = 1;
int ret;
memset(&res, 0, sizeof(res));
np = of_find_compatible_node(NULL, NULL, "pnpPNP,b00");
if (!np)
return -ENODEV;
ret = of_address_to_resource(np, 0, &res[0]);
of_node_put(np);
if (ret)
return ret;
/*
* RTC_PORT(x) is hardcoded in asm/mc146818rtc.h. Verify that the
* address provided by the device node matches.
*/
if (res[0].start != RTC_PORT(0))
return -EINVAL;
np = of_find_compatible_node(NULL, NULL, "chrp,iic");
if (!np)
np = of_find_compatible_node(NULL, NULL, "pnpPNP,000");
if (np) {
of_node_put(np);
/*
* Use a fixed interrupt value of 8 since on PPC if we are
* using this its off an i8259 which we ensure has interrupt
* numbers 0..15.
*/
res[1].start = 8;
res[1].end = 8;
res[1].flags = IORESOURCE_IRQ;
num_res++;
}
pd = platform_device_register_simple("rtc_cmos", -1,
&res[0], num_res);
return PTR_ERR_OR_ZERO(pd);
}
fs_initcall(add_rtc);
MODULE_LICENSE("GPL");
| linux-master | arch/powerpc/sysdev/rtc_cmos_setup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* memory mapped NVRAM
*
* (C) Copyright IBM Corp. 2005
*
* Authors : Utz Bacher <[email protected]>
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/machdep.h>
#include <asm/nvram.h>
static void __iomem *mmio_nvram_start;
static long mmio_nvram_len;
static DEFINE_SPINLOCK(mmio_nvram_lock);
static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index)
{
unsigned long flags;
if (*index >= mmio_nvram_len)
return 0;
if (*index + count > mmio_nvram_len)
count = mmio_nvram_len - *index;
spin_lock_irqsave(&mmio_nvram_lock, flags);
memcpy_fromio(buf, mmio_nvram_start + *index, count);
spin_unlock_irqrestore(&mmio_nvram_lock, flags);
*index += count;
return count;
}
static unsigned char mmio_nvram_read_val(int addr)
{
unsigned long flags;
unsigned char val;
if (addr >= mmio_nvram_len)
return 0xff;
spin_lock_irqsave(&mmio_nvram_lock, flags);
val = ioread8(mmio_nvram_start + addr);
spin_unlock_irqrestore(&mmio_nvram_lock, flags);
return val;
}
static ssize_t mmio_nvram_write(char *buf, size_t count, loff_t *index)
{
unsigned long flags;
if (*index >= mmio_nvram_len)
return 0;
if (*index + count > mmio_nvram_len)
count = mmio_nvram_len - *index;
spin_lock_irqsave(&mmio_nvram_lock, flags);
memcpy_toio(mmio_nvram_start + *index, buf, count);
spin_unlock_irqrestore(&mmio_nvram_lock, flags);
*index += count;
return count;
}
static void mmio_nvram_write_val(int addr, unsigned char val)
{
unsigned long flags;
if (addr < mmio_nvram_len) {
spin_lock_irqsave(&mmio_nvram_lock, flags);
iowrite8(val, mmio_nvram_start + addr);
spin_unlock_irqrestore(&mmio_nvram_lock, flags);
}
}
static ssize_t mmio_nvram_get_size(void)
{
return mmio_nvram_len;
}
int __init mmio_nvram_init(void)
{
struct device_node *nvram_node;
unsigned long nvram_addr;
struct resource r;
int ret;
nvram_node = of_find_node_by_type(NULL, "nvram");
if (!nvram_node)
nvram_node = of_find_compatible_node(NULL, NULL, "nvram");
if (!nvram_node) {
printk(KERN_WARNING "nvram: no node found in device-tree\n");
return -ENODEV;
}
ret = of_address_to_resource(nvram_node, 0, &r);
if (ret) {
printk(KERN_WARNING "nvram: failed to get address (err %d)\n",
ret);
goto out;
}
nvram_addr = r.start;
mmio_nvram_len = resource_size(&r);
if ( (!mmio_nvram_len) || (!nvram_addr) ) {
printk(KERN_WARNING "nvram: address or length is 0\n");
ret = -EIO;
goto out;
}
mmio_nvram_start = ioremap(nvram_addr, mmio_nvram_len);
if (!mmio_nvram_start) {
printk(KERN_WARNING "nvram: failed to ioremap\n");
ret = -ENOMEM;
goto out;
}
printk(KERN_INFO "mmio NVRAM, %luk at 0x%lx mapped to %p\n",
mmio_nvram_len >> 10, nvram_addr, mmio_nvram_start);
ppc_md.nvram_read_val = mmio_nvram_read_val;
ppc_md.nvram_write_val = mmio_nvram_write_val;
ppc_md.nvram_read = mmio_nvram_read;
ppc_md.nvram_write = mmio_nvram_write;
ppc_md.nvram_size = mmio_nvram_get_size;
out:
of_node_put(nvram_node);
return ret;
}
| linux-master | arch/powerpc/sysdev/mmio_nvram.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/powerpc/sysdev/ipic.c
*
* IPIC routines implementations.
*
* Copyright 2005 Freescale Semiconductor, Inc.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/syscore_ops.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/fsl_devices.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/ipic.h>
#include "ipic.h"
static struct ipic * primary_ipic;
static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip;
static DEFINE_RAW_SPINLOCK(ipic_lock);
static struct ipic_info ipic_info[] = {
[1] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_C,
.force = IPIC_SIFCR_H,
.bit = 16,
.prio_mask = 0,
},
[2] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_C,
.force = IPIC_SIFCR_H,
.bit = 17,
.prio_mask = 1,
},
[3] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_C,
.force = IPIC_SIFCR_H,
.bit = 18,
.prio_mask = 2,
},
[4] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_C,
.force = IPIC_SIFCR_H,
.bit = 19,
.prio_mask = 3,
},
[5] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_C,
.force = IPIC_SIFCR_H,
.bit = 20,
.prio_mask = 4,
},
[6] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_C,
.force = IPIC_SIFCR_H,
.bit = 21,
.prio_mask = 5,
},
[7] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_C,
.force = IPIC_SIFCR_H,
.bit = 22,
.prio_mask = 6,
},
[8] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_C,
.force = IPIC_SIFCR_H,
.bit = 23,
.prio_mask = 7,
},
[9] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 24,
.prio_mask = 0,
},
[10] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 25,
.prio_mask = 1,
},
[11] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 26,
.prio_mask = 2,
},
[12] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 27,
.prio_mask = 3,
},
[13] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 28,
.prio_mask = 4,
},
[14] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 29,
.prio_mask = 5,
},
[15] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 30,
.prio_mask = 6,
},
[16] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_D,
.force = IPIC_SIFCR_H,
.bit = 31,
.prio_mask = 7,
},
[17] = {
.ack = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_A,
.force = IPIC_SEFCR,
.bit = 1,
.prio_mask = 5,
},
[18] = {
.ack = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_A,
.force = IPIC_SEFCR,
.bit = 2,
.prio_mask = 6,
},
[19] = {
.ack = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_A,
.force = IPIC_SEFCR,
.bit = 3,
.prio_mask = 7,
},
[20] = {
.ack = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_B,
.force = IPIC_SEFCR,
.bit = 4,
.prio_mask = 4,
},
[21] = {
.ack = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_B,
.force = IPIC_SEFCR,
.bit = 5,
.prio_mask = 5,
},
[22] = {
.ack = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_B,
.force = IPIC_SEFCR,
.bit = 6,
.prio_mask = 6,
},
[23] = {
.ack = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_B,
.force = IPIC_SEFCR,
.bit = 7,
.prio_mask = 7,
},
[32] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 0,
.prio_mask = 0,
},
[33] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 1,
.prio_mask = 1,
},
[34] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 2,
.prio_mask = 2,
},
[35] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 3,
.prio_mask = 3,
},
[36] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 4,
.prio_mask = 4,
},
[37] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 5,
.prio_mask = 5,
},
[38] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 6,
.prio_mask = 6,
},
[39] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_A,
.force = IPIC_SIFCR_H,
.bit = 7,
.prio_mask = 7,
},
[40] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_B,
.force = IPIC_SIFCR_H,
.bit = 8,
.prio_mask = 0,
},
[41] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_B,
.force = IPIC_SIFCR_H,
.bit = 9,
.prio_mask = 1,
},
[42] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_B,
.force = IPIC_SIFCR_H,
.bit = 10,
.prio_mask = 2,
},
[43] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_B,
.force = IPIC_SIFCR_H,
.bit = 11,
.prio_mask = 3,
},
[44] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_B,
.force = IPIC_SIFCR_H,
.bit = 12,
.prio_mask = 4,
},
[45] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_B,
.force = IPIC_SIFCR_H,
.bit = 13,
.prio_mask = 5,
},
[46] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_B,
.force = IPIC_SIFCR_H,
.bit = 14,
.prio_mask = 6,
},
[47] = {
.mask = IPIC_SIMSR_H,
.prio = IPIC_SIPRR_B,
.force = IPIC_SIFCR_H,
.bit = 15,
.prio_mask = 7,
},
[48] = {
.ack = IPIC_SEPNR,
.mask = IPIC_SEMSR,
.prio = IPIC_SMPRR_A,
.force = IPIC_SEFCR,
.bit = 0,
.prio_mask = 4,
},
[64] = {
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_A,
.force = IPIC_SIFCR_L,
.bit = 0,
.prio_mask = 0,
},
[65] = {
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_A,
.force = IPIC_SIFCR_L,
.bit = 1,
.prio_mask = 1,
},
[66] = {
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_A,
.force = IPIC_SIFCR_L,
.bit = 2,
.prio_mask = 2,
},
[67] = {
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_A,
.force = IPIC_SIFCR_L,
.bit = 3,
.prio_mask = 3,
},
[68] = {
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_B,
.force = IPIC_SIFCR_L,
.bit = 4,
.prio_mask = 0,
},
[69] = {
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_B,
.force = IPIC_SIFCR_L,
.bit = 5,
.prio_mask = 1,
},
[70] = {
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_B,
.force = IPIC_SIFCR_L,
.bit = 6,
.prio_mask = 2,
},
[71] = {
.mask = IPIC_SIMSR_L,
.prio = IPIC_SMPRR_B,
.force = IPIC_SIFCR_L,
.bit = 7,
.prio_mask = 3,
},
[72] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 8,
},
[73] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 9,
},
[74] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 10,
},
[75] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 11,
},
[76] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 12,
},
[77] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 13,
},
[78] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 14,
},
[79] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 15,
},
[80] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 16,
},
[81] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 17,
},
[82] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 18,
},
[83] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 19,
},
[84] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 20,
},
[85] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 21,
},
[86] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 22,
},
[87] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 23,
},
[88] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 24,
},
[89] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 25,
},
[90] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 26,
},
[91] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 27,
},
[94] = {
.mask = IPIC_SIMSR_L,
.prio = 0,
.force = IPIC_SIFCR_L,
.bit = 30,
},
};
static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg)
{
return in_be32(base + (reg >> 2));
}
static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value)
{
out_be32(base + (reg >> 2), value);
}
static inline struct ipic * ipic_from_irq(unsigned int virq)
{
return primary_ipic;
}
static void ipic_unmask_irq(struct irq_data *d)
{
struct ipic *ipic = ipic_from_irq(d->irq);
unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 temp;
raw_spin_lock_irqsave(&ipic_lock, flags);
temp = ipic_read(ipic->regs, ipic_info[src].mask);
temp |= (1 << (31 - ipic_info[src].bit));
ipic_write(ipic->regs, ipic_info[src].mask, temp);
raw_spin_unlock_irqrestore(&ipic_lock, flags);
}
static void ipic_mask_irq(struct irq_data *d)
{
struct ipic *ipic = ipic_from_irq(d->irq);
unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 temp;
raw_spin_lock_irqsave(&ipic_lock, flags);
temp = ipic_read(ipic->regs, ipic_info[src].mask);
temp &= ~(1 << (31 - ipic_info[src].bit));
ipic_write(ipic->regs, ipic_info[src].mask, temp);
/* mb() can't guarantee that masking is finished. But it does finish
* for nearly all cases. */
mb();
raw_spin_unlock_irqrestore(&ipic_lock, flags);
}
static void ipic_ack_irq(struct irq_data *d)
{
struct ipic *ipic = ipic_from_irq(d->irq);
unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 temp;
raw_spin_lock_irqsave(&ipic_lock, flags);
temp = 1 << (31 - ipic_info[src].bit);
ipic_write(ipic->regs, ipic_info[src].ack, temp);
/* mb() can't guarantee that ack is finished. But it does finish
* for nearly all cases. */
mb();
raw_spin_unlock_irqrestore(&ipic_lock, flags);
}
static void ipic_mask_irq_and_ack(struct irq_data *d)
{
struct ipic *ipic = ipic_from_irq(d->irq);
unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 temp;
raw_spin_lock_irqsave(&ipic_lock, flags);
temp = ipic_read(ipic->regs, ipic_info[src].mask);
temp &= ~(1 << (31 - ipic_info[src].bit));
ipic_write(ipic->regs, ipic_info[src].mask, temp);
temp = 1 << (31 - ipic_info[src].bit);
ipic_write(ipic->regs, ipic_info[src].ack, temp);
/* mb() can't guarantee that ack is finished. But it does finish
* for nearly all cases. */
mb();
raw_spin_unlock_irqrestore(&ipic_lock, flags);
}
static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
struct ipic *ipic = ipic_from_irq(d->irq);
unsigned int src = irqd_to_hwirq(d);
unsigned int vold, vnew, edibit;
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_LEVEL_LOW;
/* ipic supports only low assertion and high-to-low change senses
*/
if (!(flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))) {
printk(KERN_ERR "ipic: sense type 0x%x not supported\n",
flow_type);
return -EINVAL;
}
/* ipic supports only edge mode on external interrupts */
if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !ipic_info[src].ack) {
printk(KERN_ERR "ipic: edge sense not supported on internal "
"interrupts\n");
return -EINVAL;
}
irqd_set_trigger_type(d, flow_type);
if (flow_type & IRQ_TYPE_LEVEL_LOW) {
irq_set_handler_locked(d, handle_level_irq);
d->chip = &ipic_level_irq_chip;
} else {
irq_set_handler_locked(d, handle_edge_irq);
d->chip = &ipic_edge_irq_chip;
}
/* only EXT IRQ senses are programmable on ipic
* internal IRQ senses are LEVEL_LOW
*/
if (src == IPIC_IRQ_EXT0)
edibit = 15;
else
if (src >= IPIC_IRQ_EXT1 && src <= IPIC_IRQ_EXT7)
edibit = (14 - (src - IPIC_IRQ_EXT1));
else
return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL;
vold = ipic_read(ipic->regs, IPIC_SECNR);
if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) {
vnew = vold | (1 << edibit);
} else {
vnew = vold & ~(1 << edibit);
}
if (vold != vnew)
ipic_write(ipic->regs, IPIC_SECNR, vnew);
return IRQ_SET_MASK_OK_NOCOPY;
}
/* level interrupts and edge interrupts have different ack operations */
static struct irq_chip ipic_level_irq_chip = {
.name = "IPIC",
.irq_unmask = ipic_unmask_irq,
.irq_mask = ipic_mask_irq,
.irq_mask_ack = ipic_mask_irq,
.irq_set_type = ipic_set_irq_type,
};
static struct irq_chip ipic_edge_irq_chip = {
.name = "IPIC",
.irq_unmask = ipic_unmask_irq,
.irq_mask = ipic_mask_irq,
.irq_mask_ack = ipic_mask_irq_and_ack,
.irq_ack = ipic_ack_irq,
.irq_set_type = ipic_set_irq_type,
};
static int ipic_host_match(struct irq_domain *h, struct device_node *node,
enum irq_domain_bus_token bus_token)
{
/* Exact match, unless ipic node is NULL */
struct device_node *of_node = irq_domain_get_of_node(h);
return of_node == NULL || of_node == node;
}
static int ipic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct ipic *ipic = h->host_data;
irq_set_chip_data(virq, ipic);
irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq);
/* Set default irq type */
irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
static const struct irq_domain_ops ipic_host_ops = {
.match = ipic_host_match,
.map = ipic_host_map,
.xlate = irq_domain_xlate_onetwocell,
};
struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
{
struct ipic *ipic;
struct resource res;
u32 temp = 0, ret;
ret = of_address_to_resource(node, 0, &res);
if (ret)
return NULL;
ipic = kzalloc(sizeof(*ipic), GFP_KERNEL);
if (ipic == NULL)
return NULL;
ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS,
&ipic_host_ops, ipic);
if (ipic->irqhost == NULL) {
kfree(ipic);
return NULL;
}
ipic->regs = ioremap(res.start, resource_size(&res));
/* init hw */
ipic_write(ipic->regs, IPIC_SICNR, 0x0);
/* default priority scheme is grouped. If spread mode is required
* configure SICFR accordingly */
if (flags & IPIC_SPREADMODE_GRP_A)
temp |= SICFR_IPSA;
if (flags & IPIC_SPREADMODE_GRP_B)
temp |= SICFR_IPSB;
if (flags & IPIC_SPREADMODE_GRP_C)
temp |= SICFR_IPSC;
if (flags & IPIC_SPREADMODE_GRP_D)
temp |= SICFR_IPSD;
if (flags & IPIC_SPREADMODE_MIX_A)
temp |= SICFR_MPSA;
if (flags & IPIC_SPREADMODE_MIX_B)
temp |= SICFR_MPSB;
ipic_write(ipic->regs, IPIC_SICFR, temp);
/* handle MCP route */
temp = 0;
if (flags & IPIC_DISABLE_MCP_OUT)
temp = SERCR_MCPR;
ipic_write(ipic->regs, IPIC_SERCR, temp);
/* handle routing of IRQ0 to MCP */
temp = ipic_read(ipic->regs, IPIC_SEMSR);
if (flags & IPIC_IRQ0_MCP)
temp |= SEMSR_SIRQ0;
else
temp &= ~SEMSR_SIRQ0;
ipic_write(ipic->regs, IPIC_SEMSR, temp);
primary_ipic = ipic;
irq_set_default_host(primary_ipic->irqhost);
ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS,
primary_ipic->regs);
return ipic;
}
void __init ipic_set_default_priority(void)
{
ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT);
ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT);
ipic_write(primary_ipic->regs, IPIC_SIPRR_C, IPIC_PRIORITY_DEFAULT);
ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_PRIORITY_DEFAULT);
ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_PRIORITY_DEFAULT);
ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_PRIORITY_DEFAULT);
}
u32 ipic_get_mcp_status(void)
{
return primary_ipic ? ipic_read(primary_ipic->regs, IPIC_SERSR) : 0;
}
void ipic_clear_mcp_status(u32 mask)
{
ipic_write(primary_ipic->regs, IPIC_SERSR, mask);
}
/* Return an interrupt vector or 0 if no interrupt is pending. */
unsigned int ipic_get_irq(void)
{
int irq;
BUG_ON(primary_ipic == NULL);
#define IPIC_SIVCR_VECTOR_MASK 0x7f
irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK;
if (irq == 0) /* 0 --> no irq is pending */
return 0;
return irq_linear_revmap(primary_ipic->irqhost, irq);
}
#ifdef CONFIG_SUSPEND
static struct {
u32 sicfr;
u32 siprr[2];
u32 simsr[2];
u32 sicnr;
u32 smprr[2];
u32 semsr;
u32 secnr;
u32 sermr;
u32 sercr;
} ipic_saved_state;
static int ipic_suspend(void)
{
struct ipic *ipic = primary_ipic;
ipic_saved_state.sicfr = ipic_read(ipic->regs, IPIC_SICFR);
ipic_saved_state.siprr[0] = ipic_read(ipic->regs, IPIC_SIPRR_A);
ipic_saved_state.siprr[1] = ipic_read(ipic->regs, IPIC_SIPRR_D);
ipic_saved_state.simsr[0] = ipic_read(ipic->regs, IPIC_SIMSR_H);
ipic_saved_state.simsr[1] = ipic_read(ipic->regs, IPIC_SIMSR_L);
ipic_saved_state.sicnr = ipic_read(ipic->regs, IPIC_SICNR);
ipic_saved_state.smprr[0] = ipic_read(ipic->regs, IPIC_SMPRR_A);
ipic_saved_state.smprr[1] = ipic_read(ipic->regs, IPIC_SMPRR_B);
ipic_saved_state.semsr = ipic_read(ipic->regs, IPIC_SEMSR);
ipic_saved_state.secnr = ipic_read(ipic->regs, IPIC_SECNR);
ipic_saved_state.sermr = ipic_read(ipic->regs, IPIC_SERMR);
ipic_saved_state.sercr = ipic_read(ipic->regs, IPIC_SERCR);
if (fsl_deep_sleep()) {
/* In deep sleep, make sure there can be no
* pending interrupts, as this can cause
* problems on 831x.
*/
ipic_write(ipic->regs, IPIC_SIMSR_H, 0);
ipic_write(ipic->regs, IPIC_SIMSR_L, 0);
ipic_write(ipic->regs, IPIC_SEMSR, 0);
ipic_write(ipic->regs, IPIC_SERMR, 0);
}
return 0;
}
static void ipic_resume(void)
{
struct ipic *ipic = primary_ipic;
ipic_write(ipic->regs, IPIC_SICFR, ipic_saved_state.sicfr);
ipic_write(ipic->regs, IPIC_SIPRR_A, ipic_saved_state.siprr[0]);
ipic_write(ipic->regs, IPIC_SIPRR_D, ipic_saved_state.siprr[1]);
ipic_write(ipic->regs, IPIC_SIMSR_H, ipic_saved_state.simsr[0]);
ipic_write(ipic->regs, IPIC_SIMSR_L, ipic_saved_state.simsr[1]);
ipic_write(ipic->regs, IPIC_SICNR, ipic_saved_state.sicnr);
ipic_write(ipic->regs, IPIC_SMPRR_A, ipic_saved_state.smprr[0]);
ipic_write(ipic->regs, IPIC_SMPRR_B, ipic_saved_state.smprr[1]);
ipic_write(ipic->regs, IPIC_SEMSR, ipic_saved_state.semsr);
ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr);
ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr);
ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr);
}
#else
#define ipic_suspend NULL
#define ipic_resume NULL
#endif
static struct syscore_ops ipic_syscore_ops = {
.suspend = ipic_suspend,
.resume = ipic_resume,
};
static int __init init_ipic_syscore(void)
{
if (!primary_ipic || !primary_ipic->regs)
return -ENODEV;
printk(KERN_DEBUG "Registering ipic system core operations\n");
register_syscore_ops(&ipic_syscore_ops);
return 0;
}
subsys_initcall(init_ipic_syscore);
| linux-master | arch/powerpc/sysdev/ipic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Suspend/resume support
*
* Copyright 2009 MontaVista Software, Inc.
*
* Author: Anton Vorontsov <[email protected]>
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/mod_devicetable.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
struct pmc_regs {
__be32 devdisr;
__be32 devdisr2;
__be32 :32;
__be32 :32;
__be32 pmcsr;
#define PMCSR_SLP (1 << 17)
};
static struct device *pmc_dev;
static struct pmc_regs __iomem *pmc_regs;
static int pmc_suspend_enter(suspend_state_t state)
{
int ret;
setbits32(&pmc_regs->pmcsr, PMCSR_SLP);
/* At this point, the CPU is asleep. */
/* Upon resume, wait for SLP bit to be clear. */
ret = spin_event_timeout((in_be32(&pmc_regs->pmcsr) & PMCSR_SLP) == 0,
10000, 10) ? 0 : -ETIMEDOUT;
if (ret)
dev_err(pmc_dev, "tired waiting for SLP bit to clear\n");
return ret;
}
static int pmc_suspend_valid(suspend_state_t state)
{
if (state != PM_SUSPEND_STANDBY)
return 0;
return 1;
}
static const struct platform_suspend_ops pmc_suspend_ops = {
.valid = pmc_suspend_valid,
.enter = pmc_suspend_enter,
};
static int pmc_probe(struct platform_device *ofdev)
{
pmc_regs = of_iomap(ofdev->dev.of_node, 0);
if (!pmc_regs)
return -ENOMEM;
pmc_dev = &ofdev->dev;
suspend_set_ops(&pmc_suspend_ops);
return 0;
}
static const struct of_device_id pmc_ids[] = {
{ .compatible = "fsl,mpc8548-pmc", },
{ .compatible = "fsl,mpc8641d-pmc", },
{ },
};
static struct platform_driver pmc_driver = {
.driver = {
.name = "fsl-pmc",
.of_match_table = pmc_ids,
},
.probe = pmc_probe,
};
builtin_platform_driver(pmc_driver);
| linux-master | arch/powerpc/sysdev/fsl_pmc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Functions for setting up and using a MPC106 northbridge
* Extracted from arch/powerpc/platforms/powermac/pci.c.
*
* Copyright (C) 2003 Benjamin Herrenschmuidt ([email protected])
* Copyright (C) 1997 Paul Mackerras ([email protected])
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/of.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/grackle.h>
#define GRACKLE_CFA(b, d, o) (0x80 | ((b) << 8) | ((d) << 16) \
| (((o) & ~3) << 24))
#define GRACKLE_PICR1_STG 0x00000040
#define GRACKLE_PICR1_LOOPSNOOP 0x00000010
/* N.B. this is called before bridges is initialized, so we can't
use grackle_pcibios_{read,write}_config_dword. */
static inline void grackle_set_stg(struct pci_controller* bp, int enable)
{
unsigned int val;
out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
val = in_le32(bp->cfg_data);
val = enable? (val | GRACKLE_PICR1_STG) :
(val & ~GRACKLE_PICR1_STG);
out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
out_le32(bp->cfg_data, val);
(void)in_le32(bp->cfg_data);
}
static inline void grackle_set_loop_snoop(struct pci_controller *bp, int enable)
{
unsigned int val;
out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
val = in_le32(bp->cfg_data);
val = enable? (val | GRACKLE_PICR1_LOOPSNOOP) :
(val & ~GRACKLE_PICR1_LOOPSNOOP);
out_be32(bp->cfg_addr, GRACKLE_CFA(0, 0, 0xa8));
out_le32(bp->cfg_data, val);
(void)in_le32(bp->cfg_data);
}
void __init setup_grackle(struct pci_controller *hose)
{
setup_indirect_pci(hose, 0xfec00000, 0xfee00000, 0);
if (of_machine_is_compatible("PowerMac1,1"))
pci_add_flags(PCI_REASSIGN_ALL_BUS);
if (of_machine_is_compatible("AAPL,PowerBook1998"))
grackle_set_loop_snoop(hose, 1);
#if 0 /* Disabled for now, HW problems ??? */
grackle_set_stg(hose, 1);
#endif
}
| linux-master | arch/powerpc/sysdev/grackle.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2006, Segher Boessenkool, IBM Corporation.
* Copyright 2006-2007, Michael Ellerman, IBM Corporation.
*/
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <asm/mpic.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/msi_bitmap.h>
#include "mpic.h"
/* A bit ugly, can we get this from the pci_dev somehow? */
static struct mpic *msi_mpic;
static void mpic_u3msi_mask_irq(struct irq_data *data)
{
pci_msi_mask_irq(data);
mpic_mask_irq(data);
}
static void mpic_u3msi_unmask_irq(struct irq_data *data)
{
mpic_unmask_irq(data);
pci_msi_unmask_irq(data);
}
static struct irq_chip mpic_u3msi_chip = {
.irq_shutdown = mpic_u3msi_mask_irq,
.irq_mask = mpic_u3msi_mask_irq,
.irq_unmask = mpic_u3msi_unmask_irq,
.irq_eoi = mpic_end_irq,
.irq_set_type = mpic_set_irq_type,
.irq_set_affinity = mpic_set_affinity,
.name = "MPIC-U3MSI",
};
static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos)
{
u8 flags;
u32 tmp;
u64 addr;
pci_read_config_byte(pdev, pos + HT_MSI_FLAGS, &flags);
if (flags & HT_MSI_FLAGS_FIXED)
return HT_MSI_FIXED_ADDR;
pci_read_config_dword(pdev, pos + HT_MSI_ADDR_LO, &tmp);
addr = tmp & HT_MSI_ADDR_LO_MASK;
pci_read_config_dword(pdev, pos + HT_MSI_ADDR_HI, &tmp);
addr = addr | ((u64)tmp << 32);
return addr;
}
static u64 find_ht_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
{
struct pci_bus *bus;
unsigned int pos;
for (bus = pdev->bus; bus && bus->self; bus = bus->parent) {
pos = pci_find_ht_capability(bus->self, HT_CAPTYPE_MSI_MAPPING);
if (pos)
return read_ht_magic_addr(bus->self, pos);
}
return 0;
}
static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
/* U4 PCIe MSIs need to write to the special register in
* the bridge that generates interrupts. There should be
* theoretically a register at 0xf8005000 where you just write
* the MSI number and that triggers the right interrupt, but
* unfortunately, this is busted in HW, the bridge endian swaps
* the value and hits the wrong nibble in the register.
*
* So instead we use another register set which is used normally
* for converting HT interrupts to MPIC interrupts, which decodes
* the interrupt number as part of the low address bits
*
* This will not work if we ever use more than one legacy MSI in
* a block but we never do. For one MSI or multiple MSI-X where
* each interrupt address can be specified separately, it works
* just fine.
*/
if (of_device_is_compatible(hose->dn, "u4-pcie") ||
of_device_is_compatible(hose->dn, "U4-pcie"))
return 0xf8004000 | (hwirq << 4);
return 0;
}
static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
irq_hw_number_t hwirq;
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
entry->irq = 0;
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
}
}
static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
unsigned int virq;
struct msi_desc *entry;
struct msi_msg msg;
u64 addr;
int hwirq;
if (type == PCI_CAP_ID_MSIX)
pr_debug("u3msi: MSI-X untested, trying anyway.\n");
/* If we can't find a magic address then MSI ain't gonna work */
if (find_ht_magic_addr(pdev, 0) == 0 &&
find_u4_magic_addr(pdev, 0) == 0) {
pr_debug("u3msi: no magic address found for %s\n",
pci_name(pdev));
return -ENXIO;
}
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1);
if (hwirq < 0) {
pr_debug("u3msi: failed allocating hwirq\n");
return hwirq;
}
addr = find_ht_magic_addr(pdev, hwirq);
if (addr == 0)
addr = find_u4_magic_addr(pdev, hwirq);
msg.address_lo = addr & 0xFFFFFFFF;
msg.address_hi = addr >> 32;
virq = irq_create_mapping(msi_mpic->irqhost, hwirq);
if (!virq) {
pr_debug("u3msi: failed mapping hwirq 0x%x\n", hwirq);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
return -ENOSPC;
}
irq_set_msi_desc(virq, entry);
irq_set_chip(virq, &mpic_u3msi_chip);
irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n",
virq, hwirq, (unsigned long)addr);
printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n",
virq, hwirq, (unsigned long)addr);
msg.data = hwirq;
pci_write_msi_msg(virq, &msg);
hwirq++;
}
return 0;
}
int __init mpic_u3msi_init(struct mpic *mpic)
{
int rc;
struct pci_controller *phb;
rc = mpic_msi_init_allocator(mpic);
if (rc) {
pr_debug("u3msi: Error allocating bitmap!\n");
return rc;
}
pr_debug("u3msi: Registering MPIC U3 MSI callbacks.\n");
BUG_ON(msi_mpic);
msi_mpic = mpic;
list_for_each_entry(phb, &hose_list, list_node) {
WARN_ON(phb->controller_ops.setup_msi_irqs);
phb->controller_ops.setup_msi_irqs = u3msi_setup_msi_irqs;
phb->controller_ops.teardown_msi_irqs = u3msi_teardown_msi_irqs;
}
return 0;
}
| linux-master | arch/powerpc/sysdev/mpic_u3msi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* tsi108/109 device setup code
*
* Maintained by Roy Zang < [email protected] >
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/major.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/export.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
#include <asm/tsi108.h>
#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <mm/mmu_decl.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) do { printk(fmt); } while(0)
#else
#define DBG(fmt...) do { } while(0)
#endif
static phys_addr_t tsi108_csr_base = -1;
phys_addr_t get_csrbase(void)
{
struct device_node *tsi;
if (tsi108_csr_base != -1)
return tsi108_csr_base;
tsi = of_find_node_by_type(NULL, "tsi-bridge");
if (tsi) {
struct resource res;
of_address_to_resource(tsi, 0, &res);
tsi108_csr_base = res.start;
of_node_put(tsi);
}
return tsi108_csr_base;
}
EXPORT_SYMBOL(get_csrbase);
u32 get_vir_csrbase(void)
{
return (u32) (ioremap(get_csrbase(), 0x10000));
}
EXPORT_SYMBOL(get_vir_csrbase);
static int __init tsi108_eth_of_init(void)
{
struct device_node *np;
unsigned int i = 0;
struct platform_device *tsi_eth_dev;
struct resource res;
int ret;
for_each_compatible_node(np, "network", "tsi108-ethernet") {
struct resource r[2];
struct device_node *phy, *mdio;
hw_info tsi_eth_data;
const unsigned int *phy_id;
const phandle *ph;
memset(r, 0, sizeof(r));
memset(&tsi_eth_data, 0, sizeof(tsi_eth_data));
ret = of_address_to_resource(np, 0, &r[0]);
DBG("%s: name:start->end = %s:%pR\n",
__func__, r[0].name, &r[0]);
if (ret)
goto err;
r[1].name = "tx";
r[1].start = irq_of_parse_and_map(np, 0);
r[1].end = irq_of_parse_and_map(np, 0);
r[1].flags = IORESOURCE_IRQ;
DBG("%s: name:start->end = %s:%pR\n",
__func__, r[1].name, &r[1]);
tsi_eth_dev =
platform_device_register_simple("tsi-ethernet", i++, &r[0],
1);
if (IS_ERR(tsi_eth_dev)) {
ret = PTR_ERR(tsi_eth_dev);
goto err;
}
of_get_mac_address(np, tsi_eth_data.mac_addr);
ph = of_get_property(np, "mdio-handle", NULL);
mdio = of_find_node_by_phandle(*ph);
ret = of_address_to_resource(mdio, 0, &res);
of_node_put(mdio);
if (ret)
goto unreg;
ph = of_get_property(np, "phy-handle", NULL);
phy = of_find_node_by_phandle(*ph);
if (phy == NULL) {
ret = -ENODEV;
goto unreg;
}
phy_id = of_get_property(phy, "reg", NULL);
tsi_eth_data.regs = r[0].start;
tsi_eth_data.phyregs = res.start;
tsi_eth_data.phy = *phy_id;
tsi_eth_data.irq_num = irq_of_parse_and_map(np, 0);
/* Some boards with the TSI108 bridge (e.g. Holly)
* have a miswiring of the ethernet PHYs which
* requires a workaround. The special
* "txc-rxc-delay-disable" property enables this
* workaround. FIXME: Need to port the tsi108_eth
* driver itself to phylib and use a non-misleading
* name for the workaround flag - it's not actually to
* do with the model of PHY in use */
if (of_property_read_bool(phy, "txc-rxc-delay-disable"))
tsi_eth_data.phy_type = TSI108_PHY_BCM54XX;
of_node_put(phy);
ret =
platform_device_add_data(tsi_eth_dev, &tsi_eth_data,
sizeof(hw_info));
if (ret)
goto unreg;
}
return 0;
unreg:
platform_device_unregister(tsi_eth_dev);
err:
of_node_put(np);
return ret;
}
arch_initcall(tsi108_eth_of_init);
| linux-master | arch/powerpc/sysdev/tsi108_dev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007-2011 Freescale Semiconductor, Inc.
*
* Author: Tony Li <[email protected]>
* Jason Jin <[email protected]>
*
* The hwirq alloc and free code reuse from sysdev/mpic_msi.c
*/
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/seq_file.h>
#include <sysdev/fsl_soc.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/mpic.h>
#include <asm/fsl_hcalls.h>
#include "fsl_msi.h"
#include "fsl_pci.h"
#define MSIIR_OFFSET_MASK 0xfffff
#define MSIIR_IBS_SHIFT 0
#define MSIIR_SRS_SHIFT 5
#define MSIIR1_IBS_SHIFT 4
#define MSIIR1_SRS_SHIFT 0
#define MSI_SRS_MASK 0xf
#define MSI_IBS_MASK 0x1f
#define msi_hwirq(msi, msir_index, intr_index) \
((msir_index) << (msi)->srs_shift | \
((intr_index) << (msi)->ibs_shift))
static LIST_HEAD(msi_head);
struct fsl_msi_feature {
u32 fsl_pic_ip;
u32 msiir_offset; /* Offset of MSIIR, relative to start of MSIR bank */
};
struct fsl_msi_cascade_data {
struct fsl_msi *msi_data;
int index;
int virq;
};
static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg)
{
return in_be32(base + (reg >> 2));
}
/*
* We do not need this actually. The MSIR register has been read once
* in the cascade interrupt. So, this MSI interrupt has been acked
*/
static void fsl_msi_end_irq(struct irq_data *d)
{
}
static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p)
{
struct fsl_msi *msi_data = irqd->domain->host_data;
irq_hw_number_t hwirq = irqd_to_hwirq(irqd);
int cascade_virq, srs;
srs = (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK;
cascade_virq = msi_data->cascade_array[srs]->virq;
seq_printf(p, " fsl-msi-%d", cascade_virq);
}
static struct irq_chip fsl_msi_chip = {
.irq_mask = pci_msi_mask_irq,
.irq_unmask = pci_msi_unmask_irq,
.irq_ack = fsl_msi_end_irq,
.irq_print_chip = fsl_msi_print_chip,
};
static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct fsl_msi *msi_data = h->host_data;
struct irq_chip *chip = &fsl_msi_chip;
irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING);
irq_set_chip_data(virq, msi_data);
irq_set_chip_and_handler(virq, chip, handle_edge_irq);
return 0;
}
static const struct irq_domain_ops fsl_msi_host_ops = {
.map = fsl_msi_host_map,
};
static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
{
int rc, hwirq;
rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
irq_domain_get_of_node(msi_data->irqhost));
if (rc)
return rc;
/*
* Reserve all the hwirqs
* The available hwirqs will be released in fsl_msi_setup_hwirq()
*/
for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
return 0;
}
static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
struct fsl_msi *msi_data;
irq_hw_number_t hwirq;
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
hwirq = virq_to_hw(entry->irq);
msi_data = irq_get_chip_data(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
entry->irq = 0;
msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}
}
static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
struct msi_msg *msg,
struct fsl_msi *fsl_msi_data)
{
struct fsl_msi *msi_data = fsl_msi_data;
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
u64 address; /* Physical address of the MSIIR */
int len;
const __be64 *reg;
/* If the msi-address-64 property exists, then use it */
reg = of_get_property(hose->dn, "msi-address-64", &len);
if (reg && (len == sizeof(u64)))
address = be64_to_cpup(reg);
else
address = fsl_pci_immrbar_base(hose) + msi_data->msiir_offset;
msg->address_lo = lower_32_bits(address);
msg->address_hi = upper_32_bits(address);
/*
* MPIC version 2.0 has erratum PIC1. It causes
* that neither MSI nor MSI-X can work fine.
* This is a workaround to allow MSI-X to function
* properly. It only works for MSI-X, we prevent
* MSI on buggy chips in fsl_setup_msi_irqs().
*/
if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
msg->data = __swab32(hwirq);
else
msg->data = hwirq;
pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
(hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
(hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
}
static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct device_node *np;
phandle phandle = 0;
int rc, hwirq = -ENOMEM;
unsigned int virq;
struct msi_desc *entry;
struct msi_msg msg;
struct fsl_msi *msi_data;
if (type == PCI_CAP_ID_MSI) {
/*
* MPIC version 2.0 has erratum PIC1. For now MSI
* could not work. So check to prevent MSI from
* being used on the board with this erratum.
*/
list_for_each_entry(msi_data, &msi_head, list)
if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
return -EINVAL;
}
/*
* If the PCI node has an fsl,msi property, then we need to use it
* to find the specific MSI.
*/
np = of_parse_phandle(hose->dn, "fsl,msi", 0);
if (np) {
if (of_device_is_compatible(np, "fsl,mpic-msi") ||
of_device_is_compatible(np, "fsl,vmpic-msi") ||
of_device_is_compatible(np, "fsl,vmpic-msi-v4.3"))
phandle = np->phandle;
else {
dev_err(&pdev->dev,
"node %pOF has an invalid fsl,msi phandle %u\n",
hose->dn, np->phandle);
of_node_put(np);
return -EINVAL;
}
of_node_put(np);
}
msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
/*
* Loop over all the MSI devices until we find one that has an
* available interrupt.
*/
list_for_each_entry(msi_data, &msi_head, list) {
/*
* If the PCI node has an fsl,msi property, then we
* restrict our search to the corresponding MSI node.
* The simplest way is to skip over MSI nodes with the
* wrong phandle. Under the Freescale hypervisor, this
* has the additional benefit of skipping over MSI
* nodes that are not mapped in the PAMU.
*/
if (phandle && (phandle != msi_data->phandle))
continue;
hwirq = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
if (hwirq >= 0)
break;
}
if (hwirq < 0) {
rc = hwirq;
dev_err(&pdev->dev, "could not allocate MSI interrupt\n");
goto out_free;
}
virq = irq_create_mapping(msi_data->irqhost, hwirq);
if (!virq) {
dev_err(&pdev->dev, "fail mapping hwirq %i\n", hwirq);
msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
rc = -ENOSPC;
goto out_free;
}
/* chip_data is msi_data via host->hostdata in host->map() */
irq_set_msi_desc(virq, entry);
fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data);
pci_write_msi_msg(virq, &msg);
}
return 0;
out_free:
/* free by the caller of this function */
return rc;
}
static irqreturn_t fsl_msi_cascade(int irq, void *data)
{
struct fsl_msi *msi_data;
int msir_index = -1;
u32 msir_value = 0;
u32 intr_index;
u32 have_shift = 0;
struct fsl_msi_cascade_data *cascade_data = data;
irqreturn_t ret = IRQ_NONE;
msi_data = cascade_data->msi_data;
msir_index = cascade_data->index;
switch (msi_data->feature & FSL_PIC_IP_MASK) {
case FSL_PIC_IP_MPIC:
msir_value = fsl_msi_read(msi_data->msi_regs,
msir_index * 0x10);
break;
case FSL_PIC_IP_IPIC:
msir_value = fsl_msi_read(msi_data->msi_regs, msir_index * 0x4);
break;
#ifdef CONFIG_EPAPR_PARAVIRT
case FSL_PIC_IP_VMPIC: {
unsigned int ret;
ret = fh_vmpic_get_msir(virq_to_hw(irq), &msir_value);
if (ret) {
pr_err("fsl-msi: fh_vmpic_get_msir() failed for "
"irq %u (ret=%u)\n", irq, ret);
msir_value = 0;
}
break;
}
#endif
}
while (msir_value) {
int err;
intr_index = ffs(msir_value) - 1;
err = generic_handle_domain_irq(msi_data->irqhost,
msi_hwirq(msi_data, msir_index,
intr_index + have_shift));
if (!err)
ret = IRQ_HANDLED;
have_shift += intr_index + 1;
msir_value = msir_value >> (intr_index + 1);
}
return ret;
}
static int fsl_of_msi_remove(struct platform_device *ofdev)
{
struct fsl_msi *msi = platform_get_drvdata(ofdev);
int virq, i;
if (msi->list.prev != NULL)
list_del(&msi->list);
for (i = 0; i < NR_MSI_REG_MAX; i++) {
if (msi->cascade_array[i]) {
virq = msi->cascade_array[i]->virq;
BUG_ON(!virq);
free_irq(virq, msi->cascade_array[i]);
kfree(msi->cascade_array[i]);
irq_dispose_mapping(virq);
}
}
if (msi->bitmap.bitmap)
msi_bitmap_free(&msi->bitmap);
if ((msi->feature & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC)
iounmap(msi->msi_regs);
kfree(msi);
return 0;
}
static struct lock_class_key fsl_msi_irq_class;
static struct lock_class_key fsl_msi_irq_request_class;
static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
int offset, int irq_index)
{
struct fsl_msi_cascade_data *cascade_data = NULL;
int virt_msir, i, ret;
virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
if (!virt_msir) {
dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n",
__func__, irq_index);
return 0;
}
cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL);
if (!cascade_data) {
dev_err(&dev->dev, "No memory for MSI cascade data\n");
return -ENOMEM;
}
irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class,
&fsl_msi_irq_request_class);
cascade_data->index = offset;
cascade_data->msi_data = msi;
cascade_data->virq = virt_msir;
msi->cascade_array[irq_index] = cascade_data;
ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
"fsl-msi-cascade", cascade_data);
if (ret) {
dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
virt_msir, ret);
return ret;
}
/* Release the hwirqs corresponding to this MSI register */
for (i = 0; i < IRQS_PER_MSI_REG; i++)
msi_bitmap_free_hwirqs(&msi->bitmap,
msi_hwirq(msi, offset, i), 1);
return 0;
}
static const struct of_device_id fsl_of_msi_ids[];
static int fsl_of_msi_probe(struct platform_device *dev)
{
const struct of_device_id *match;
struct fsl_msi *msi;
struct resource res, msiir;
int err, i, j, irq_index, count;
const u32 *p;
const struct fsl_msi_feature *features;
int len;
u32 offset;
struct pci_controller *phb;
match = of_match_device(fsl_of_msi_ids, &dev->dev);
if (!match)
return -EINVAL;
features = match->data;
printk(KERN_DEBUG "Setting up Freescale MSI support\n");
msi = kzalloc(sizeof(struct fsl_msi), GFP_KERNEL);
if (!msi) {
dev_err(&dev->dev, "No memory for MSI structure\n");
return -ENOMEM;
}
platform_set_drvdata(dev, msi);
msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
if (msi->irqhost == NULL) {
dev_err(&dev->dev, "No memory for MSI irqhost\n");
err = -ENOMEM;
goto error_out;
}
/*
* Under the Freescale hypervisor, the msi nodes don't have a 'reg'
* property. Instead, we use hypercalls to access the MSI.
*/
if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) != FSL_PIC_IP_VMPIC) {
err = of_address_to_resource(dev->dev.of_node, 0, &res);
if (err) {
dev_err(&dev->dev, "invalid resource for node %pOF\n",
dev->dev.of_node);
goto error_out;
}
msi->msi_regs = ioremap(res.start, resource_size(&res));
if (!msi->msi_regs) {
err = -ENOMEM;
dev_err(&dev->dev, "could not map node %pOF\n",
dev->dev.of_node);
goto error_out;
}
msi->msiir_offset =
features->msiir_offset + (res.start & 0xfffff);
/*
* First read the MSIIR/MSIIR1 offset from dts
* On failure use the hardcode MSIIR offset
*/
if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
msi->msiir_offset = features->msiir_offset +
(res.start & MSIIR_OFFSET_MASK);
else
msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
}
msi->feature = features->fsl_pic_ip;
/* For erratum PIC1 on MPIC version 2.0*/
if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
&& (fsl_mpic_primary_get_version() == 0x0200))
msi->feature |= MSI_HW_ERRATA_ENDIAN;
/*
* Remember the phandle, so that we can match with any PCI nodes
* that have an "fsl,msi" property.
*/
msi->phandle = dev->dev.of_node->phandle;
err = fsl_msi_init_allocator(msi);
if (err) {
dev_err(&dev->dev, "Error allocating MSI bitmap\n");
goto error_out;
}
p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3") ||
of_device_is_compatible(dev->dev.of_node, "fsl,vmpic-msi-v4.3")) {
msi->srs_shift = MSIIR1_SRS_SHIFT;
msi->ibs_shift = MSIIR1_IBS_SHIFT;
if (p)
dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
__func__);
for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
irq_index++) {
err = fsl_msi_setup_hwirq(msi, dev,
irq_index, irq_index);
if (err)
goto error_out;
}
} else {
static const u32 all_avail[] =
{ 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
msi->srs_shift = MSIIR_SRS_SHIFT;
msi->ibs_shift = MSIIR_IBS_SHIFT;
if (p && len % (2 * sizeof(u32)) != 0) {
dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
__func__);
err = -EINVAL;
goto error_out;
}
if (!p) {
p = all_avail;
len = sizeof(all_avail);
}
for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
if (p[i * 2] % IRQS_PER_MSI_REG ||
p[i * 2 + 1] % IRQS_PER_MSI_REG) {
pr_warn("%s: %pOF: msi available range of %u at %u is not IRQ-aligned\n",
__func__, dev->dev.of_node,
p[i * 2 + 1], p[i * 2]);
err = -EINVAL;
goto error_out;
}
offset = p[i * 2] / IRQS_PER_MSI_REG;
count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
for (j = 0; j < count; j++, irq_index++) {
err = fsl_msi_setup_hwirq(msi, dev, offset + j,
irq_index);
if (err)
goto error_out;
}
}
}
list_add_tail(&msi->list, &msi_head);
/*
* Apply the MSI ops to all the controllers.
* It doesn't hurt to reassign the same ops,
* but bail out if we find another MSI driver.
*/
list_for_each_entry(phb, &hose_list, list_node) {
if (!phb->controller_ops.setup_msi_irqs) {
phb->controller_ops.setup_msi_irqs = fsl_setup_msi_irqs;
phb->controller_ops.teardown_msi_irqs = fsl_teardown_msi_irqs;
} else if (phb->controller_ops.setup_msi_irqs != fsl_setup_msi_irqs) {
dev_err(&dev->dev, "Different MSI driver already installed!\n");
err = -ENODEV;
goto error_out;
}
}
return 0;
error_out:
fsl_of_msi_remove(dev);
return err;
}
static const struct fsl_msi_feature mpic_msi_feature = {
.fsl_pic_ip = FSL_PIC_IP_MPIC,
.msiir_offset = 0x140,
};
static const struct fsl_msi_feature ipic_msi_feature = {
.fsl_pic_ip = FSL_PIC_IP_IPIC,
.msiir_offset = 0x38,
};
static const struct fsl_msi_feature vmpic_msi_feature = {
.fsl_pic_ip = FSL_PIC_IP_VMPIC,
.msiir_offset = 0,
};
static const struct of_device_id fsl_of_msi_ids[] = {
{
.compatible = "fsl,mpic-msi",
.data = &mpic_msi_feature,
},
{
.compatible = "fsl,mpic-msi-v4.3",
.data = &mpic_msi_feature,
},
{
.compatible = "fsl,ipic-msi",
.data = &ipic_msi_feature,
},
#ifdef CONFIG_EPAPR_PARAVIRT
{
.compatible = "fsl,vmpic-msi",
.data = &vmpic_msi_feature,
},
{
.compatible = "fsl,vmpic-msi-v4.3",
.data = &vmpic_msi_feature,
},
#endif
{}
};
static struct platform_driver fsl_of_msi_driver = {
.driver = {
.name = "fsl-msi",
.of_match_table = fsl_of_msi_ids,
},
.probe = fsl_of_msi_probe,
.remove = fsl_of_msi_remove,
};
static __init int fsl_of_msi_init(void)
{
return platform_driver_register(&fsl_of_msi_driver);
}
subsys_initcall(fsl_of_msi_init);
| linux-master | arch/powerpc/sysdev/fsl_msi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale General-purpose Timers Module
*
* Copyright (c) Freescale Semiconductor, Inc. 2006.
* Shlomi Gridish <[email protected]>
* Jerry Huang <[email protected]>
* Copyright (c) MontaVista Software, Inc. 2008.
* Anton Vorontsov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/fsl_gtm.h>
#define GTCFR_STP(x) ((x) & 1 ? 1 << 5 : 1 << 1)
#define GTCFR_RST(x) ((x) & 1 ? 1 << 4 : 1 << 0)
#define GTMDR_ICLK_MASK (3 << 1)
#define GTMDR_ICLK_ICAS (0 << 1)
#define GTMDR_ICLK_ICLK (1 << 1)
#define GTMDR_ICLK_SLGO (2 << 1)
#define GTMDR_FRR (1 << 3)
#define GTMDR_ORI (1 << 4)
#define GTMDR_SPS(x) ((x) << 8)
struct gtm_timers_regs {
u8 gtcfr1; /* Timer 1, Timer 2 global config register */
u8 res0[0x3];
u8 gtcfr2; /* Timer 3, timer 4 global config register */
u8 res1[0xB];
__be16 gtmdr1; /* Timer 1 mode register */
__be16 gtmdr2; /* Timer 2 mode register */
__be16 gtrfr1; /* Timer 1 reference register */
__be16 gtrfr2; /* Timer 2 reference register */
__be16 gtcpr1; /* Timer 1 capture register */
__be16 gtcpr2; /* Timer 2 capture register */
__be16 gtcnr1; /* Timer 1 counter */
__be16 gtcnr2; /* Timer 2 counter */
__be16 gtmdr3; /* Timer 3 mode register */
__be16 gtmdr4; /* Timer 4 mode register */
__be16 gtrfr3; /* Timer 3 reference register */
__be16 gtrfr4; /* Timer 4 reference register */
__be16 gtcpr3; /* Timer 3 capture register */
__be16 gtcpr4; /* Timer 4 capture register */
__be16 gtcnr3; /* Timer 3 counter */
__be16 gtcnr4; /* Timer 4 counter */
__be16 gtevr1; /* Timer 1 event register */
__be16 gtevr2; /* Timer 2 event register */
__be16 gtevr3; /* Timer 3 event register */
__be16 gtevr4; /* Timer 4 event register */
__be16 gtpsr1; /* Timer 1 prescale register */
__be16 gtpsr2; /* Timer 2 prescale register */
__be16 gtpsr3; /* Timer 3 prescale register */
__be16 gtpsr4; /* Timer 4 prescale register */
u8 res2[0x40];
} __attribute__ ((packed));
struct gtm {
unsigned int clock;
struct gtm_timers_regs __iomem *regs;
struct gtm_timer timers[4];
spinlock_t lock;
struct list_head list_node;
};
static LIST_HEAD(gtms);
/**
* gtm_get_timer - request GTM timer to use it with the rest of GTM API
* Context: non-IRQ
*
* This function reserves GTM timer for later use. It returns gtm_timer
* structure to use with the rest of GTM API, you should use timer->irq
* to manage timer interrupt.
*/
struct gtm_timer *gtm_get_timer16(void)
{
struct gtm *gtm;
int i;
list_for_each_entry(gtm, >ms, list_node) {
spin_lock_irq(>m->lock);
for (i = 0; i < ARRAY_SIZE(gtm->timers); i++) {
if (!gtm->timers[i].requested) {
gtm->timers[i].requested = true;
spin_unlock_irq(>m->lock);
return >m->timers[i];
}
}
spin_unlock_irq(>m->lock);
}
if (!list_empty(>ms))
return ERR_PTR(-EBUSY);
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL(gtm_get_timer16);
/**
* gtm_get_specific_timer - request specific GTM timer
* @gtm: specific GTM, pass here GTM's device_node->data
* @timer: specific timer number, Timer1 is 0.
* Context: non-IRQ
*
* This function reserves GTM timer for later use. It returns gtm_timer
* structure to use with the rest of GTM API, you should use timer->irq
* to manage timer interrupt.
*/
struct gtm_timer *gtm_get_specific_timer16(struct gtm *gtm,
unsigned int timer)
{
struct gtm_timer *ret = ERR_PTR(-EBUSY);
if (timer > 3)
return ERR_PTR(-EINVAL);
spin_lock_irq(>m->lock);
if (gtm->timers[timer].requested)
goto out;
ret = >m->timers[timer];
ret->requested = true;
out:
spin_unlock_irq(>m->lock);
return ret;
}
EXPORT_SYMBOL(gtm_get_specific_timer16);
/**
* gtm_put_timer16 - release 16 bits GTM timer
* @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer
* Context: any
*
* This function releases GTM timer so others may request it.
*/
void gtm_put_timer16(struct gtm_timer *tmr)
{
gtm_stop_timer16(tmr);
spin_lock_irq(&tmr->gtm->lock);
tmr->requested = false;
spin_unlock_irq(&tmr->gtm->lock);
}
EXPORT_SYMBOL(gtm_put_timer16);
/*
* This is back-end for the exported functions, it's used to reset single
* timer in reference mode.
*/
static int gtm_set_ref_timer16(struct gtm_timer *tmr, int frequency,
int reference_value, bool free_run)
{
struct gtm *gtm = tmr->gtm;
int num = tmr - >m->timers[0];
unsigned int prescaler;
u8 iclk = GTMDR_ICLK_ICLK;
u8 psr;
u8 sps;
unsigned long flags;
int max_prescaler = 256 * 256 * 16;
/* CPM2 doesn't have primary prescaler */
if (!tmr->gtpsr)
max_prescaler /= 256;
prescaler = gtm->clock / frequency;
/*
* We have two 8 bit prescalers -- primary and secondary (psr, sps),
* plus "slow go" mode (clk / 16). So, total prescale value is
* 16 * (psr + 1) * (sps + 1). Though, for CPM2 GTMs we losing psr.
*/
if (prescaler > max_prescaler)
return -EINVAL;
if (prescaler > max_prescaler / 16) {
iclk = GTMDR_ICLK_SLGO;
prescaler /= 16;
}
if (prescaler <= 256) {
psr = 0;
sps = prescaler - 1;
} else {
psr = 256 - 1;
sps = prescaler / 256 - 1;
}
spin_lock_irqsave(>m->lock, flags);
/*
* Properly reset timers: stop, reset, set up prescalers, reference
* value and clear event register.
*/
clrsetbits_8(tmr->gtcfr, ~(GTCFR_STP(num) | GTCFR_RST(num)),
GTCFR_STP(num) | GTCFR_RST(num));
setbits8(tmr->gtcfr, GTCFR_STP(num));
if (tmr->gtpsr)
out_be16(tmr->gtpsr, psr);
clrsetbits_be16(tmr->gtmdr, 0xFFFF, iclk | GTMDR_SPS(sps) |
GTMDR_ORI | (free_run ? GTMDR_FRR : 0));
out_be16(tmr->gtcnr, 0);
out_be16(tmr->gtrfr, reference_value);
out_be16(tmr->gtevr, 0xFFFF);
/* Let it be. */
clrbits8(tmr->gtcfr, GTCFR_STP(num));
spin_unlock_irqrestore(>m->lock, flags);
return 0;
}
/**
* gtm_set_timer16 - (re)set 16 bit timer with arbitrary precision
* @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer
* @usec: timer interval in microseconds
* @reload: if set, the timer will reset upon expiry rather than
* continue running free.
* Context: any
*
* This function (re)sets the GTM timer so that it counts up to the requested
* interval value, and fires the interrupt when the value is reached. This
* function will reduce the precision of the timer as needed in order for the
* requested timeout to fit in a 16-bit register.
*/
int gtm_set_timer16(struct gtm_timer *tmr, unsigned long usec, bool reload)
{
/* quite obvious, frequency which is enough for µSec precision */
int freq = 1000000;
unsigned int bit;
bit = fls_long(usec);
if (bit > 15) {
freq >>= bit - 15;
usec >>= bit - 15;
}
if (!freq)
return -EINVAL;
return gtm_set_ref_timer16(tmr, freq, usec, reload);
}
EXPORT_SYMBOL(gtm_set_timer16);
/**
* gtm_set_exact_utimer16 - (re)set 16 bits timer
* @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer
* @usec: timer interval in microseconds
* @reload: if set, the timer will reset upon expiry rather than
* continue running free.
* Context: any
*
* This function (re)sets GTM timer so that it counts up to the requested
* interval value, and fires the interrupt when the value is reached. If reload
* flag was set, timer will also reset itself upon reference value, otherwise
* it continues to increment.
*
* The _exact_ bit in the function name states that this function will not
* crop precision of the "usec" argument, thus usec is limited to 16 bits
* (single timer width).
*/
int gtm_set_exact_timer16(struct gtm_timer *tmr, u16 usec, bool reload)
{
/* quite obvious, frequency which is enough for µSec precision */
const int freq = 1000000;
/*
* We can lower the frequency (and probably power consumption) by
* dividing both frequency and usec by 2 until there is no remainder.
* But we won't bother with this unless savings are measured, so just
* run the timer as is.
*/
return gtm_set_ref_timer16(tmr, freq, usec, reload);
}
EXPORT_SYMBOL(gtm_set_exact_timer16);
/**
* gtm_stop_timer16 - stop single timer
* @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer
* Context: any
*
* This function simply stops the GTM timer.
*/
void gtm_stop_timer16(struct gtm_timer *tmr)
{
struct gtm *gtm = tmr->gtm;
int num = tmr - >m->timers[0];
unsigned long flags;
spin_lock_irqsave(>m->lock, flags);
setbits8(tmr->gtcfr, GTCFR_STP(num));
out_be16(tmr->gtevr, 0xFFFF);
spin_unlock_irqrestore(>m->lock, flags);
}
EXPORT_SYMBOL(gtm_stop_timer16);
/**
* gtm_ack_timer16 - acknowledge timer event (free-run timers only)
* @tmr: pointer to the gtm_timer structure obtained from gtm_get_timer
* @events: events mask to ack
* Context: any
*
* Thus function used to acknowledge timer interrupt event, use it inside the
* interrupt handler.
*/
void gtm_ack_timer16(struct gtm_timer *tmr, u16 events)
{
out_be16(tmr->gtevr, events);
}
EXPORT_SYMBOL(gtm_ack_timer16);
static void __init gtm_set_shortcuts(struct device_node *np,
struct gtm_timer *timers,
struct gtm_timers_regs __iomem *regs)
{
/*
* Yeah, I don't like this either, but timers' registers a bit messed,
* so we have to provide shortcuts to write timer independent code.
* Alternative option is to create gt*() accessors, but that will be
* even uglier and cryptic.
*/
timers[0].gtcfr = ®s->gtcfr1;
timers[0].gtmdr = ®s->gtmdr1;
timers[0].gtcnr = ®s->gtcnr1;
timers[0].gtrfr = ®s->gtrfr1;
timers[0].gtevr = ®s->gtevr1;
timers[1].gtcfr = ®s->gtcfr1;
timers[1].gtmdr = ®s->gtmdr2;
timers[1].gtcnr = ®s->gtcnr2;
timers[1].gtrfr = ®s->gtrfr2;
timers[1].gtevr = ®s->gtevr2;
timers[2].gtcfr = ®s->gtcfr2;
timers[2].gtmdr = ®s->gtmdr3;
timers[2].gtcnr = ®s->gtcnr3;
timers[2].gtrfr = ®s->gtrfr3;
timers[2].gtevr = ®s->gtevr3;
timers[3].gtcfr = ®s->gtcfr2;
timers[3].gtmdr = ®s->gtmdr4;
timers[3].gtcnr = ®s->gtcnr4;
timers[3].gtrfr = ®s->gtrfr4;
timers[3].gtevr = ®s->gtevr4;
/* CPM2 doesn't have primary prescaler */
if (!of_device_is_compatible(np, "fsl,cpm2-gtm")) {
timers[0].gtpsr = ®s->gtpsr1;
timers[1].gtpsr = ®s->gtpsr2;
timers[2].gtpsr = ®s->gtpsr3;
timers[3].gtpsr = ®s->gtpsr4;
}
}
static int __init fsl_gtm_init(void)
{
struct device_node *np;
for_each_compatible_node(np, NULL, "fsl,gtm") {
int i;
struct gtm *gtm;
const u32 *clock;
int size;
gtm = kzalloc(sizeof(*gtm), GFP_KERNEL);
if (!gtm) {
pr_err("%pOF: unable to allocate memory\n",
np);
continue;
}
spin_lock_init(>m->lock);
clock = of_get_property(np, "clock-frequency", &size);
if (!clock || size != sizeof(*clock)) {
pr_err("%pOF: no clock-frequency\n", np);
goto err;
}
gtm->clock = *clock;
for (i = 0; i < ARRAY_SIZE(gtm->timers); i++) {
unsigned int irq;
irq = irq_of_parse_and_map(np, i);
if (!irq) {
pr_err("%pOF: not enough interrupts specified\n",
np);
goto err;
}
gtm->timers[i].irq = irq;
gtm->timers[i].gtm = gtm;
}
gtm->regs = of_iomap(np, 0);
if (!gtm->regs) {
pr_err("%pOF: unable to iomap registers\n",
np);
goto err;
}
gtm_set_shortcuts(np, gtm->timers, gtm->regs);
list_add(>m->list_node, >ms);
/* We don't want to lose the node and its ->data */
np->data = gtm;
of_node_get(np);
continue;
err:
kfree(gtm);
}
return 0;
}
arch_initcall(fsl_gtm_init);
| linux-master | arch/powerpc/sysdev/fsl_gtm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Freescale MPC85xx/MPC86xx RapidIO RMU support
*
* Copyright 2009 Sysgo AG
* Thomas Moll <[email protected]>
* - fixed maintenance access routines, check for aligned access
*
* Copyright 2009 Integrated Device Technology, Inc.
* Alex Bounine <[email protected]>
* - Added Port-Write message handling
* - Added Machine Check exception handling
*
* Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
* Zhang Wei <[email protected]>
* Lian Minghuan-B31939 <[email protected]>
* Liu Gang <[email protected]>
*
* Copyright 2005 MontaVista Software, Inc.
* Matt Porter <[email protected]>
*/
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
#include "fsl_rio.h"
#define GET_RMM_HANDLE(mport) \
(((struct rio_priv *)(mport->priv))->rmm_handle)
/* RapidIO definition irq, which read from OF-tree */
#define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq)
#define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq)
#define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq)
#define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq)
#define RIO_MIN_TX_RING_SIZE 2
#define RIO_MAX_TX_RING_SIZE 2048
#define RIO_MIN_RX_RING_SIZE 2
#define RIO_MAX_RX_RING_SIZE 2048
#define RIO_IPWMR_SEN 0x00100000
#define RIO_IPWMR_QFIE 0x00000100
#define RIO_IPWMR_EIE 0x00000020
#define RIO_IPWMR_CQ 0x00000002
#define RIO_IPWMR_PWE 0x00000001
#define RIO_IPWSR_QF 0x00100000
#define RIO_IPWSR_TE 0x00000080
#define RIO_IPWSR_QFI 0x00000010
#define RIO_IPWSR_PWD 0x00000008
#define RIO_IPWSR_PWB 0x00000004
#define RIO_EPWISR 0x10010
/* EPWISR Error match value */
#define RIO_EPWISR_PINT1 0x80000000
#define RIO_EPWISR_PINT2 0x40000000
#define RIO_EPWISR_MU 0x00000002
#define RIO_EPWISR_PW 0x00000001
#define IPWSR_CLEAR 0x98
#define OMSR_CLEAR 0x1cb3
#define IMSR_CLEAR 0x491
#define IDSR_CLEAR 0x91
#define ODSR_CLEAR 0x1c00
#define LTLEECSR_ENABLE_ALL 0xFFC000FC
#define RIO_LTLEECSR 0x060c
#define RIO_IM0SR 0x64
#define RIO_IM1SR 0x164
#define RIO_OM0SR 0x4
#define RIO_OM1SR 0x104
#define RIO_DBELL_WIN_SIZE 0x1000
#define RIO_MSG_OMR_MUI 0x00000002
#define RIO_MSG_OSR_TE 0x00000080
#define RIO_MSG_OSR_QOI 0x00000020
#define RIO_MSG_OSR_QFI 0x00000010
#define RIO_MSG_OSR_MUB 0x00000004
#define RIO_MSG_OSR_EOMI 0x00000002
#define RIO_MSG_OSR_QEI 0x00000001
#define RIO_MSG_IMR_MI 0x00000002
#define RIO_MSG_ISR_TE 0x00000080
#define RIO_MSG_ISR_QFI 0x00000010
#define RIO_MSG_ISR_DIQI 0x00000001
#define RIO_MSG_DESC_SIZE 32
#define RIO_MSG_BUFFER_SIZE 4096
#define DOORBELL_DMR_DI 0x00000002
#define DOORBELL_DSR_TE 0x00000080
#define DOORBELL_DSR_QFI 0x00000010
#define DOORBELL_DSR_DIQI 0x00000001
#define DOORBELL_MESSAGE_SIZE 0x08
static DEFINE_SPINLOCK(fsl_rio_doorbell_lock);
struct rio_msg_regs {
u32 omr;
u32 osr;
u32 pad1;
u32 odqdpar;
u32 pad2;
u32 osar;
u32 odpr;
u32 odatr;
u32 odcr;
u32 pad3;
u32 odqepar;
u32 pad4[13];
u32 imr;
u32 isr;
u32 pad5;
u32 ifqdpar;
u32 pad6;
u32 ifqepar;
};
struct rio_dbell_regs {
u32 odmr;
u32 odsr;
u32 pad1[4];
u32 oddpr;
u32 oddatr;
u32 pad2[3];
u32 odretcr;
u32 pad3[12];
u32 dmr;
u32 dsr;
u32 pad4;
u32 dqdpar;
u32 pad5;
u32 dqepar;
};
struct rio_pw_regs {
u32 pwmr;
u32 pwsr;
u32 epwqbar;
u32 pwqbar;
};
struct rio_tx_desc {
u32 pad1;
u32 saddr;
u32 dport;
u32 dattr;
u32 pad2;
u32 pad3;
u32 dwcnt;
u32 pad4;
};
struct rio_msg_tx_ring {
void *virt;
dma_addr_t phys;
void *virt_buffer[RIO_MAX_TX_RING_SIZE];
dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
int tx_slot;
int size;
void *dev_id;
};
struct rio_msg_rx_ring {
void *virt;
dma_addr_t phys;
void *virt_buffer[RIO_MAX_RX_RING_SIZE];
int rx_slot;
int size;
void *dev_id;
};
struct fsl_rmu {
struct rio_msg_regs __iomem *msg_regs;
struct rio_msg_tx_ring msg_tx_ring;
struct rio_msg_rx_ring msg_rx_ring;
int txirq;
int rxirq;
};
struct rio_dbell_msg {
u16 pad1;
u16 tid;
u16 sid;
u16 info;
};
/**
* fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
* @irq: Linux interrupt number
* @dev_instance: Pointer to interrupt-specific data
*
* Handles outbound message interrupts. Executes a register outbound
* mailbox event handler and acks the interrupt occurrence.
*/
static irqreturn_t
fsl_rio_tx_handler(int irq, void *dev_instance)
{
int osr;
struct rio_mport *port = (struct rio_mport *)dev_instance;
struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
osr = in_be32(&rmu->msg_regs->osr);
if (osr & RIO_MSG_OSR_TE) {
pr_info("RIO: outbound message transmission error\n");
out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE);
goto out;
}
if (osr & RIO_MSG_OSR_QOI) {
pr_info("RIO: outbound message queue overflow\n");
out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI);
goto out;
}
if (osr & RIO_MSG_OSR_EOMI) {
u32 dqp = in_be32(&rmu->msg_regs->odqdpar);
int slot = (dqp - rmu->msg_tx_ring.phys) >> 5;
if (port->outb_msg[0].mcback != NULL) {
port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id,
-1,
slot);
}
/* Ack the end-of-message interrupt */
out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI);
}
out:
return IRQ_HANDLED;
}
/**
* fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
* @irq: Linux interrupt number
* @dev_instance: Pointer to interrupt-specific data
*
* Handles inbound message interrupts. Executes a registered inbound
* mailbox event handler and acks the interrupt occurrence.
*/
static irqreturn_t
fsl_rio_rx_handler(int irq, void *dev_instance)
{
int isr;
struct rio_mport *port = (struct rio_mport *)dev_instance;
struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
isr = in_be32(&rmu->msg_regs->isr);
if (isr & RIO_MSG_ISR_TE) {
pr_info("RIO: inbound message reception error\n");
out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE);
goto out;
}
/* XXX Need to check/dispatch until queue empty */
if (isr & RIO_MSG_ISR_DIQI) {
/*
* Can receive messages for any mailbox/letter to that
* mailbox destination. So, make the callback with an
* unknown/invalid mailbox number argument.
*/
if (port->inb_msg[0].mcback != NULL)
port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id,
-1,
-1);
/* Ack the queueing interrupt */
out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI);
}
out:
return IRQ_HANDLED;
}
/**
* fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
* @irq: Linux interrupt number
* @dev_instance: Pointer to interrupt-specific data
*
* Handles doorbell interrupts. Parses a list of registered
* doorbell event handlers and executes a matching event handler.
*/
static irqreturn_t
fsl_rio_dbell_handler(int irq, void *dev_instance)
{
int dsr;
struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance;
int i;
dsr = in_be32(&fsl_dbell->dbell_regs->dsr);
if (dsr & DOORBELL_DSR_TE) {
pr_info("RIO: doorbell reception error\n");
out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE);
goto out;
}
if (dsr & DOORBELL_DSR_QFI) {
pr_info("RIO: doorbell queue full\n");
out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI);
}
/* XXX Need to check/dispatch until queue empty */
if (dsr & DOORBELL_DSR_DIQI) {
struct rio_dbell_msg *dmsg =
fsl_dbell->dbell_ring.virt +
(in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
struct rio_dbell *dbell;
int found = 0;
pr_debug
("RIO: processing doorbell,"
" sid %2.2x tid %2.2x info %4.4x\n",
dmsg->sid, dmsg->tid, dmsg->info);
for (i = 0; i < MAX_PORT_NUM; i++) {
if (fsl_dbell->mport[i]) {
list_for_each_entry(dbell,
&fsl_dbell->mport[i]->dbells, node) {
if ((dbell->res->start
<= dmsg->info)
&& (dbell->res->end
>= dmsg->info)) {
found = 1;
break;
}
}
if (found && dbell->dinb) {
dbell->dinb(fsl_dbell->mport[i],
dbell->dev_id, dmsg->sid,
dmsg->tid,
dmsg->info);
break;
}
}
}
if (!found) {
pr_debug
("RIO: spurious doorbell,"
" sid %2.2x tid %2.2x info %4.4x\n",
dmsg->sid, dmsg->tid,
dmsg->info);
}
setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
}
out:
return IRQ_HANDLED;
}
static void msg_unit_error_handler(void)
{
/*XXX: Error recovery is not implemented, we just clear errors */
out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR);
out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR);
out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR);
out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR);
out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR);
out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR);
out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR);
}
/**
* fsl_rio_port_write_handler - MPC85xx port write interrupt handler
* @irq: Linux interrupt number
* @dev_instance: Pointer to interrupt-specific data
*
* Handles port write interrupts. Parses a list of registered
* port write event handlers and executes a matching event handler.
*/
static irqreturn_t
fsl_rio_port_write_handler(int irq, void *dev_instance)
{
u32 ipwmr, ipwsr;
struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance;
u32 epwisr, tmp;
epwisr = in_be32(rio_regs_win + RIO_EPWISR);
if (!(epwisr & RIO_EPWISR_PW))
goto pw_done;
ipwmr = in_be32(&pw->pw_regs->pwmr);
ipwsr = in_be32(&pw->pw_regs->pwsr);
#ifdef DEBUG_PW
pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
if (ipwsr & RIO_IPWSR_QF)
pr_debug(" QF");
if (ipwsr & RIO_IPWSR_TE)
pr_debug(" TE");
if (ipwsr & RIO_IPWSR_QFI)
pr_debug(" QFI");
if (ipwsr & RIO_IPWSR_PWD)
pr_debug(" PWD");
if (ipwsr & RIO_IPWSR_PWB)
pr_debug(" PWB");
pr_debug(" )\n");
#endif
/* Schedule deferred processing if PW was received */
if (ipwsr & RIO_IPWSR_QFI) {
/* Save PW message (if there is room in FIFO),
* otherwise discard it.
*/
if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) {
pw->port_write_msg.msg_count++;
kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt,
RIO_PW_MSG_SIZE);
} else {
pw->port_write_msg.discard_count++;
pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
pw->port_write_msg.discard_count);
}
/* Clear interrupt and issue Clear Queue command. This allows
* another port-write to be received.
*/
out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI);
out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
schedule_work(&pw->pw_work);
}
if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
pw->port_write_msg.err_count++;
pr_debug("RIO: Port-Write Transaction Err (%d)\n",
pw->port_write_msg.err_count);
/* Clear Transaction Error: port-write controller should be
* disabled when clearing this error
*/
out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE);
out_be32(&pw->pw_regs->pwmr, ipwmr);
}
if (ipwsr & RIO_IPWSR_PWD) {
pw->port_write_msg.discard_count++;
pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
pw->port_write_msg.discard_count);
out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD);
}
pw_done:
if (epwisr & RIO_EPWISR_PINT1) {
tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
fsl_rio_port_error_handler(0);
}
if (epwisr & RIO_EPWISR_PINT2) {
tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
fsl_rio_port_error_handler(1);
}
if (epwisr & RIO_EPWISR_MU) {
tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
msg_unit_error_handler();
}
return IRQ_HANDLED;
}
static void fsl_pw_dpc(struct work_struct *work)
{
struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
union rio_pw_msg msg_buffer;
int i;
/*
* Process port-write messages
*/
while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)&msg_buffer,
RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) {
#ifdef DEBUG_PW
{
u32 i;
pr_debug("%s : Port-Write Message:", __func__);
for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
if ((i%4) == 0)
pr_debug("\n0x%02x: 0x%08x", i*4,
msg_buffer.raw[i]);
else
pr_debug(" 0x%08x", msg_buffer.raw[i]);
}
pr_debug("\n");
}
#endif
/* Pass the port-write message to RIO core for processing */
for (i = 0; i < MAX_PORT_NUM; i++) {
if (pw->mport[i])
rio_inb_pwrite_handler(pw->mport[i],
&msg_buffer);
}
}
}
/**
* fsl_rio_pw_enable - enable/disable port-write interface init
* @mport: Master port implementing the port write unit
* @enable: 1=enable; 0=disable port-write message handling
*/
int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
{
u32 rval;
rval = in_be32(&pw->pw_regs->pwmr);
if (enable)
rval |= RIO_IPWMR_PWE;
else
rval &= ~RIO_IPWMR_PWE;
out_be32(&pw->pw_regs->pwmr, rval);
return 0;
}
/**
* fsl_rio_port_write_init - MPC85xx port write interface init
* @mport: Master port implementing the port write unit
*
* Initializes port write unit hardware and DMA buffer
* ring. Called from fsl_rio_setup(). Returns %0 on success
* or %-ENOMEM on failure.
*/
int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
{
int rc = 0;
/* Following configurations require a disabled port write controller */
out_be32(&pw->pw_regs->pwmr,
in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE);
/* Initialize port write */
pw->port_write_msg.virt = dma_alloc_coherent(pw->dev,
RIO_PW_MSG_SIZE,
&pw->port_write_msg.phys, GFP_KERNEL);
if (!pw->port_write_msg.virt) {
pr_err("RIO: unable allocate port write queue\n");
return -ENOMEM;
}
pw->port_write_msg.err_count = 0;
pw->port_write_msg.discard_count = 0;
/* Point dequeue/enqueue pointers at first entry */
out_be32(&pw->pw_regs->epwqbar, 0);
out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys);
pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
in_be32(&pw->pw_regs->epwqbar),
in_be32(&pw->pw_regs->pwqbar));
/* Clear interrupt status IPWSR */
out_be32(&pw->pw_regs->pwsr,
(RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
/* Configure port write controller for snooping enable all reporting,
clear queue full */
out_be32(&pw->pw_regs->pwmr,
RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
/* Hook up port-write handler */
rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler,
IRQF_SHARED, "port-write", (void *)pw);
if (rc < 0) {
pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
goto err_out;
}
/* Enable Error Interrupt */
out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
INIT_WORK(&pw->pw_work, fsl_pw_dpc);
spin_lock_init(&pw->pw_fifo_lock);
if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
pr_err("FIFO allocation failed\n");
rc = -ENOMEM;
goto err_out_irq;
}
pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
in_be32(&pw->pw_regs->pwmr),
in_be32(&pw->pw_regs->pwsr));
return rc;
err_out_irq:
free_irq(IRQ_RIO_PW(pw), (void *)pw);
err_out:
dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE,
pw->port_write_msg.virt,
pw->port_write_msg.phys);
return rc;
}
/**
* fsl_rio_doorbell_send - Send a MPC85xx doorbell message
* @mport: RapidIO master port info
* @index: ID of RapidIO interface
* @destid: Destination ID of target device
* @data: 16-bit info field of RapidIO doorbell message
*
* Sends a MPC85xx doorbell message. Returns %0 on success or
* %-EINVAL on failure.
*/
int fsl_rio_doorbell_send(struct rio_mport *mport,
int index, u16 destid, u16 data)
{
unsigned long flags;
pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
index, destid, data);
spin_lock_irqsave(&fsl_rio_doorbell_lock, flags);
/* In the serial version silicons, such as MPC8548, MPC8641,
* below operations is must be.
*/
out_be32(&dbell->dbell_regs->odmr, 0x00000000);
out_be32(&dbell->dbell_regs->odretcr, 0x00000004);
out_be32(&dbell->dbell_regs->oddpr, destid << 16);
out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
out_be32(&dbell->dbell_regs->odmr, 0x00000001);
spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags);
return 0;
}
/**
* fsl_add_outb_message - Add message to the MPC85xx outbound message queue
* @mport: Master port with outbound message queue
* @rdev: Target of outbound message
* @mbox: Outbound mailbox
* @buffer: Message to add to outbound queue
* @len: Length of message
*
* Adds the @buffer message to the MPC85xx outbound message queue. Returns
* %0 on success or %-EINVAL on failure.
*/
int
fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
void *buffer, size_t len)
{
struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
u32 omr;
struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt
+ rmu->msg_tx_ring.tx_slot;
int ret = 0;
pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
"%p len %8.8zx\n", rdev->destid, mbox, buffer, len);
if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
ret = -EINVAL;
goto out;
}
/* Copy and clear rest of buffer */
memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer,
len);
if (len < (RIO_MAX_MSG_SIZE - 4))
memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot]
+ len, 0, RIO_MAX_MSG_SIZE - len);
/* Set mbox field for message, and set destid */
desc->dport = (rdev->destid << 16) | (mbox & 0x3);
/* Enable EOMI interrupt and priority */
desc->dattr = 0x28000000 | ((mport->index) << 20);
/* Set transfer size aligned to next power of 2 (in double words) */
desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
/* Set snooping and source buffer address */
desc->saddr = 0x00000004
| rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot];
/* Increment enqueue pointer */
omr = in_be32(&rmu->msg_regs->omr);
out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
/* Go to next descriptor */
if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size)
rmu->msg_tx_ring.tx_slot = 0;
out:
return ret;
}
/**
* fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
* @mport: Master port implementing the outbound message unit
* @dev_id: Device specific pointer to pass on event
* @mbox: Mailbox to open
* @entries: Number of entries in the outbound mailbox ring
*
* Initializes buffer ring, request the outbound message interrupt,
* and enables the outbound message unit. Returns %0 on success and
* %-EINVAL or %-ENOMEM on failure.
*/
int
fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
{
int i, j, rc = 0;
struct rio_priv *priv = mport->priv;
struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
if ((entries < RIO_MIN_TX_RING_SIZE) ||
(entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
rc = -EINVAL;
goto out;
}
/* Initialize shadow copy ring */
rmu->msg_tx_ring.dev_id = dev_id;
rmu->msg_tx_ring.size = entries;
for (i = 0; i < rmu->msg_tx_ring.size; i++) {
rmu->msg_tx_ring.virt_buffer[i] =
dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
&rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
if (!rmu->msg_tx_ring.virt_buffer[i]) {
rc = -ENOMEM;
for (j = 0; j < rmu->msg_tx_ring.size; j++)
if (rmu->msg_tx_ring.virt_buffer[j])
dma_free_coherent(priv->dev,
RIO_MSG_BUFFER_SIZE,
rmu->msg_tx_ring.
virt_buffer[j],
rmu->msg_tx_ring.
phys_buffer[j]);
goto out;
}
}
/* Initialize outbound message descriptor ring */
rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
&rmu->msg_tx_ring.phys,
GFP_KERNEL);
if (!rmu->msg_tx_ring.virt) {
rc = -ENOMEM;
goto out_dma;
}
rmu->msg_tx_ring.tx_slot = 0;
/* Point dequeue/enqueue pointers at first entry in ring */
out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys);
out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys);
/* Configure for snooping */
out_be32(&rmu->msg_regs->osar, 0x00000004);
/* Clear interrupt status */
out_be32(&rmu->msg_regs->osr, 0x000000b3);
/* Hook up outbound message handler */
rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
"msg_tx", (void *)mport);
if (rc < 0)
goto out_irq;
/*
* Configure outbound message unit
* Snooping
* Interrupts (all enabled, except QEIE)
* Chaining mode
* Disable
*/
out_be32(&rmu->msg_regs->omr, 0x00100220);
/* Set number of entries */
out_be32(&rmu->msg_regs->omr,
in_be32(&rmu->msg_regs->omr) |
((get_bitmask_order(entries) - 2) << 12));
/* Now enable the unit */
out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1);
out:
return rc;
out_irq:
dma_free_coherent(priv->dev,
rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
out_dma:
for (i = 0; i < rmu->msg_tx_ring.size; i++)
dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
rmu->msg_tx_ring.virt_buffer[i],
rmu->msg_tx_ring.phys_buffer[i]);
return rc;
}
/**
* fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
* @mport: Master port implementing the outbound message unit
* @mbox: Mailbox to close
*
* Disables the outbound message unit, free all buffers, and
* frees the outbound message interrupt.
*/
void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
{
struct rio_priv *priv = mport->priv;
struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
/* Disable inbound message unit */
out_be32(&rmu->msg_regs->omr, 0);
/* Free ring */
dma_free_coherent(priv->dev,
rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
/* Free interrupt */
free_irq(IRQ_RIO_TX(mport), (void *)mport);
}
/**
* fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
* @mport: Master port implementing the inbound message unit
* @dev_id: Device specific pointer to pass on event
* @mbox: Mailbox to open
* @entries: Number of entries in the inbound mailbox ring
*
* Initializes buffer ring, request the inbound message interrupt,
* and enables the inbound message unit. Returns %0 on success
* and %-EINVAL or %-ENOMEM on failure.
*/
int
fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
{
int i, rc = 0;
struct rio_priv *priv = mport->priv;
struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
if ((entries < RIO_MIN_RX_RING_SIZE) ||
(entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
rc = -EINVAL;
goto out;
}
/* Initialize client buffer ring */
rmu->msg_rx_ring.dev_id = dev_id;
rmu->msg_rx_ring.size = entries;
rmu->msg_rx_ring.rx_slot = 0;
for (i = 0; i < rmu->msg_rx_ring.size; i++)
rmu->msg_rx_ring.virt_buffer[i] = NULL;
/* Initialize inbound message ring */
rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
&rmu->msg_rx_ring.phys, GFP_KERNEL);
if (!rmu->msg_rx_ring.virt) {
rc = -ENOMEM;
goto out;
}
/* Point dequeue/enqueue pointers at first entry in ring */
out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys);
out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys);
/* Clear interrupt status */
out_be32(&rmu->msg_regs->isr, 0x00000091);
/* Hook up inbound message handler */
rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
"msg_rx", (void *)mport);
if (rc < 0) {
dma_free_coherent(priv->dev,
rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
goto out;
}
/*
* Configure inbound message unit:
* Snooping
* 4KB max message size
* Unmask all interrupt sources
* Disable
*/
out_be32(&rmu->msg_regs->imr, 0x001b0060);
/* Set number of queue entries */
setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
/* Now enable the unit */
setbits32(&rmu->msg_regs->imr, 0x1);
out:
return rc;
}
/**
* fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
* @mport: Master port implementing the inbound message unit
* @mbox: Mailbox to close
*
* Disables the inbound message unit, free all buffers, and
* frees the inbound message interrupt.
*/
void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
{
struct rio_priv *priv = mport->priv;
struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
/* Disable inbound message unit */
out_be32(&rmu->msg_regs->imr, 0);
/* Free ring */
dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
/* Free interrupt */
free_irq(IRQ_RIO_RX(mport), (void *)mport);
}
/**
* fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
* @mport: Master port implementing the inbound message unit
* @mbox: Inbound mailbox number
* @buf: Buffer to add to inbound queue
*
* Adds the @buf buffer to the MPC85xx inbound message queue. Returns
* %0 on success or %-EINVAL on failure.
*/
int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
{
int rc = 0;
struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
rmu->msg_rx_ring.rx_slot);
if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) {
printk(KERN_ERR
"RIO: error adding inbound buffer %d, buffer exists\n",
rmu->msg_rx_ring.rx_slot);
rc = -EINVAL;
goto out;
}
rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf;
if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size)
rmu->msg_rx_ring.rx_slot = 0;
out:
return rc;
}
/**
* fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
* @mport: Master port implementing the inbound message unit
* @mbox: Inbound mailbox number
*
* Gets the next available inbound message from the inbound message queue.
* A pointer to the message is returned on success or NULL on failure.
*/
void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
{
struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
u32 phys_buf;
void *virt_buf;
void *buf = NULL;
int buf_idx;
phys_buf = in_be32(&rmu->msg_regs->ifqdpar);
/* If no more messages, then bail out */
if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
goto out2;
virt_buf = rmu->msg_rx_ring.virt + (phys_buf
- rmu->msg_rx_ring.phys);
buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
if (!buf) {
printk(KERN_ERR
"RIO: inbound message copy failed, no buffers\n");
goto out1;
}
/* Copy max message size, caller is expected to allocate that big */
memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE);
/* Clear the available buffer */
rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
out1:
setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI);
out2:
return buf;
}
/**
* fsl_rio_doorbell_init - MPC85xx doorbell interface init
* @mport: Master port implementing the inbound doorbell unit
*
* Initializes doorbell unit hardware and inbound DMA buffer
* ring. Called from fsl_rio_setup(). Returns %0 on success
* or %-ENOMEM on failure.
*/
int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell)
{
int rc = 0;
/* Initialize inbound doorbells */
dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 *
DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL);
if (!dbell->dbell_ring.virt) {
printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
rc = -ENOMEM;
goto out;
}
/* Point dequeue/enqueue pointers at first entry in ring */
out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys);
out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys);
/* Clear interrupt status */
out_be32(&dbell->dbell_regs->dsr, 0x00000091);
/* Hook up doorbell handler */
rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0,
"dbell_rx", (void *)dbell);
if (rc < 0) {
dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE,
dbell->dbell_ring.virt, dbell->dbell_ring.phys);
printk(KERN_ERR
"MPC85xx RIO: unable to request inbound doorbell irq");
goto out;
}
/* Configure doorbells for snooping, 512 entries, and enable */
out_be32(&dbell->dbell_regs->dmr, 0x00108161);
out:
return rc;
}
int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
{
struct rio_priv *priv;
struct fsl_rmu *rmu;
u64 msg_start;
if (!mport || !mport->priv)
return -EINVAL;
priv = mport->priv;
if (!node) {
dev_warn(priv->dev, "Can't get %pOF property 'fsl,rmu'\n",
priv->dev->of_node);
return -EINVAL;
}
rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL);
if (!rmu)
return -ENOMEM;
if (of_property_read_reg(node, 0, &msg_start, NULL)) {
pr_err("%pOF: unable to find 'reg' property of message-unit\n",
node);
kfree(rmu);
return -ENOMEM;
}
rmu->msg_regs = (struct rio_msg_regs *)
(rmu_regs_win + (u32)msg_start);
rmu->txirq = irq_of_parse_and_map(node, 0);
rmu->rxirq = irq_of_parse_and_map(node, 1);
printk(KERN_INFO "%pOF: txirq: %d, rxirq %d\n",
node, rmu->txirq, rmu->rxirq);
priv->rmm_handle = rmu;
rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
return 0;
}
| linux-master | arch/powerpc/sysdev/fsl_rmu.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/property.h>
#include <asm/mpc5xxx.h>
/**
* mpc5xxx_fwnode_get_bus_frequency - Find the bus frequency for a firmware node
* @fwnode: firmware node
*
* Returns bus frequency (IPS on MPC512x, IPB on MPC52xx),
* or 0 if the bus frequency cannot be found.
*/
unsigned long mpc5xxx_fwnode_get_bus_frequency(struct fwnode_handle *fwnode)
{
struct fwnode_handle *parent;
u32 bus_freq;
int ret;
ret = fwnode_property_read_u32(fwnode, "bus-frequency", &bus_freq);
if (!ret)
return bus_freq;
fwnode_for_each_parent_node(fwnode, parent) {
ret = fwnode_property_read_u32(parent, "bus-frequency", &bus_freq);
if (!ret) {
fwnode_handle_put(parent);
return bus_freq;
}
}
return 0;
}
EXPORT_SYMBOL(mpc5xxx_fwnode_get_bus_frequency);
| linux-master | arch/powerpc/sysdev/mpc5xxx_clocks.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RCPM(Run Control/Power Management) support
*
* Copyright 2012-2015 Freescale Semiconductor Inc.
*
* Author: Chenhui Zhao <[email protected]>
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/of_address.h>
#include <linux/export.h>
#include <asm/io.h>
#include <linux/fsl/guts.h>
#include <asm/cputhreads.h>
#include <asm/fsl_pm.h>
#include <asm/smp.h>
static struct ccsr_rcpm_v1 __iomem *rcpm_v1_regs;
static struct ccsr_rcpm_v2 __iomem *rcpm_v2_regs;
static unsigned int fsl_supported_pm_modes;
static void rcpm_v1_irq_mask(int cpu)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
unsigned int mask = 1 << hw_cpu;
setbits32(&rcpm_v1_regs->cpmimr, mask);
setbits32(&rcpm_v1_regs->cpmcimr, mask);
setbits32(&rcpm_v1_regs->cpmmcmr, mask);
setbits32(&rcpm_v1_regs->cpmnmimr, mask);
}
static void rcpm_v2_irq_mask(int cpu)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
unsigned int mask = 1 << hw_cpu;
setbits32(&rcpm_v2_regs->tpmimr0, mask);
setbits32(&rcpm_v2_regs->tpmcimr0, mask);
setbits32(&rcpm_v2_regs->tpmmcmr0, mask);
setbits32(&rcpm_v2_regs->tpmnmimr0, mask);
}
static void rcpm_v1_irq_unmask(int cpu)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
unsigned int mask = 1 << hw_cpu;
clrbits32(&rcpm_v1_regs->cpmimr, mask);
clrbits32(&rcpm_v1_regs->cpmcimr, mask);
clrbits32(&rcpm_v1_regs->cpmmcmr, mask);
clrbits32(&rcpm_v1_regs->cpmnmimr, mask);
}
static void rcpm_v2_irq_unmask(int cpu)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
unsigned int mask = 1 << hw_cpu;
clrbits32(&rcpm_v2_regs->tpmimr0, mask);
clrbits32(&rcpm_v2_regs->tpmcimr0, mask);
clrbits32(&rcpm_v2_regs->tpmmcmr0, mask);
clrbits32(&rcpm_v2_regs->tpmnmimr0, mask);
}
static void rcpm_v1_set_ip_power(bool enable, u32 mask)
{
if (enable)
setbits32(&rcpm_v1_regs->ippdexpcr, mask);
else
clrbits32(&rcpm_v1_regs->ippdexpcr, mask);
}
static void rcpm_v2_set_ip_power(bool enable, u32 mask)
{
if (enable)
setbits32(&rcpm_v2_regs->ippdexpcr[0], mask);
else
clrbits32(&rcpm_v2_regs->ippdexpcr[0], mask);
}
static void rcpm_v1_cpu_enter_state(int cpu, int state)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
unsigned int mask = 1 << hw_cpu;
switch (state) {
case E500_PM_PH10:
setbits32(&rcpm_v1_regs->cdozcr, mask);
break;
case E500_PM_PH15:
setbits32(&rcpm_v1_regs->cnapcr, mask);
break;
default:
pr_warn("Unknown cpu PM state (%d)\n", state);
break;
}
}
static void rcpm_v2_cpu_enter_state(int cpu, int state)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
u32 mask = 1 << cpu_core_index_of_thread(cpu);
switch (state) {
case E500_PM_PH10:
/* one bit corresponds to one thread for PH10 of 6500 */
setbits32(&rcpm_v2_regs->tph10setr0, 1 << hw_cpu);
break;
case E500_PM_PH15:
setbits32(&rcpm_v2_regs->pcph15setr, mask);
break;
case E500_PM_PH20:
setbits32(&rcpm_v2_regs->pcph20setr, mask);
break;
case E500_PM_PH30:
setbits32(&rcpm_v2_regs->pcph30setr, mask);
break;
default:
pr_warn("Unknown cpu PM state (%d)\n", state);
}
}
static void rcpm_v1_cpu_die(int cpu)
{
rcpm_v1_cpu_enter_state(cpu, E500_PM_PH15);
}
#ifdef CONFIG_PPC64
static void qoriq_disable_thread(int cpu)
{
int thread = cpu_thread_in_core(cpu);
book3e_stop_thread(thread);
}
#endif
static void rcpm_v2_cpu_die(int cpu)
{
#ifdef CONFIG_PPC64
int primary;
if (threads_per_core == 2) {
primary = cpu_first_thread_sibling(cpu);
if (cpu_is_offline(primary) && cpu_is_offline(primary + 1)) {
/* if both threads are offline, put the cpu in PH20 */
rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
} else {
/* if only one thread is offline, disable the thread */
qoriq_disable_thread(cpu);
}
}
#endif
if (threads_per_core == 1)
rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
}
static void rcpm_v1_cpu_exit_state(int cpu, int state)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
unsigned int mask = 1 << hw_cpu;
switch (state) {
case E500_PM_PH10:
clrbits32(&rcpm_v1_regs->cdozcr, mask);
break;
case E500_PM_PH15:
clrbits32(&rcpm_v1_regs->cnapcr, mask);
break;
default:
pr_warn("Unknown cpu PM state (%d)\n", state);
break;
}
}
static void rcpm_v1_cpu_up_prepare(int cpu)
{
rcpm_v1_cpu_exit_state(cpu, E500_PM_PH15);
rcpm_v1_irq_unmask(cpu);
}
static void rcpm_v2_cpu_exit_state(int cpu, int state)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
u32 mask = 1 << cpu_core_index_of_thread(cpu);
switch (state) {
case E500_PM_PH10:
setbits32(&rcpm_v2_regs->tph10clrr0, 1 << hw_cpu);
break;
case E500_PM_PH15:
setbits32(&rcpm_v2_regs->pcph15clrr, mask);
break;
case E500_PM_PH20:
setbits32(&rcpm_v2_regs->pcph20clrr, mask);
break;
case E500_PM_PH30:
setbits32(&rcpm_v2_regs->pcph30clrr, mask);
break;
default:
pr_warn("Unknown cpu PM state (%d)\n", state);
}
}
static void rcpm_v2_cpu_up_prepare(int cpu)
{
rcpm_v2_cpu_exit_state(cpu, E500_PM_PH20);
rcpm_v2_irq_unmask(cpu);
}
static int rcpm_v1_plat_enter_state(int state)
{
u32 *pmcsr_reg = &rcpm_v1_regs->powmgtcsr;
int ret = 0;
int result;
switch (state) {
case PLAT_PM_SLEEP:
setbits32(pmcsr_reg, RCPM_POWMGTCSR_SLP);
/* Upon resume, wait for RCPM_POWMGTCSR_SLP bit to be clear. */
result = spin_event_timeout(
!(in_be32(pmcsr_reg) & RCPM_POWMGTCSR_SLP), 10000, 10);
if (!result) {
pr_err("timeout waiting for SLP bit to be cleared\n");
ret = -ETIMEDOUT;
}
break;
default:
pr_warn("Unknown platform PM state (%d)", state);
ret = -EINVAL;
}
return ret;
}
static int rcpm_v2_plat_enter_state(int state)
{
u32 *pmcsr_reg = &rcpm_v2_regs->powmgtcsr;
int ret = 0;
int result;
switch (state) {
case PLAT_PM_LPM20:
/* clear previous LPM20 status */
setbits32(pmcsr_reg, RCPM_POWMGTCSR_P_LPM20_ST);
/* enter LPM20 status */
setbits32(pmcsr_reg, RCPM_POWMGTCSR_LPM20_RQ);
/* At this point, the device is in LPM20 status. */
/* resume ... */
result = spin_event_timeout(
!(in_be32(pmcsr_reg) & RCPM_POWMGTCSR_LPM20_ST), 10000, 10);
if (!result) {
pr_err("timeout waiting for LPM20 bit to be cleared\n");
ret = -ETIMEDOUT;
}
break;
default:
pr_warn("Unknown platform PM state (%d)\n", state);
ret = -EINVAL;
}
return ret;
}
static int rcpm_v1_plat_enter_sleep(void)
{
return rcpm_v1_plat_enter_state(PLAT_PM_SLEEP);
}
static int rcpm_v2_plat_enter_sleep(void)
{
return rcpm_v2_plat_enter_state(PLAT_PM_LPM20);
}
static void rcpm_common_freeze_time_base(u32 *tben_reg, int freeze)
{
static u32 mask;
if (freeze) {
mask = in_be32(tben_reg);
clrbits32(tben_reg, mask);
} else {
setbits32(tben_reg, mask);
}
/* read back to push the previous write */
in_be32(tben_reg);
}
static void rcpm_v1_freeze_time_base(bool freeze)
{
rcpm_common_freeze_time_base(&rcpm_v1_regs->ctbenr, freeze);
}
static void rcpm_v2_freeze_time_base(bool freeze)
{
rcpm_common_freeze_time_base(&rcpm_v2_regs->pctbenr, freeze);
}
static unsigned int rcpm_get_pm_modes(void)
{
return fsl_supported_pm_modes;
}
static const struct fsl_pm_ops qoriq_rcpm_v1_ops = {
.irq_mask = rcpm_v1_irq_mask,
.irq_unmask = rcpm_v1_irq_unmask,
.cpu_enter_state = rcpm_v1_cpu_enter_state,
.cpu_exit_state = rcpm_v1_cpu_exit_state,
.cpu_up_prepare = rcpm_v1_cpu_up_prepare,
.cpu_die = rcpm_v1_cpu_die,
.plat_enter_sleep = rcpm_v1_plat_enter_sleep,
.set_ip_power = rcpm_v1_set_ip_power,
.freeze_time_base = rcpm_v1_freeze_time_base,
.get_pm_modes = rcpm_get_pm_modes,
};
static const struct fsl_pm_ops qoriq_rcpm_v2_ops = {
.irq_mask = rcpm_v2_irq_mask,
.irq_unmask = rcpm_v2_irq_unmask,
.cpu_enter_state = rcpm_v2_cpu_enter_state,
.cpu_exit_state = rcpm_v2_cpu_exit_state,
.cpu_up_prepare = rcpm_v2_cpu_up_prepare,
.cpu_die = rcpm_v2_cpu_die,
.plat_enter_sleep = rcpm_v2_plat_enter_sleep,
.set_ip_power = rcpm_v2_set_ip_power,
.freeze_time_base = rcpm_v2_freeze_time_base,
.get_pm_modes = rcpm_get_pm_modes,
};
static const struct of_device_id rcpm_matches[] = {
{
.compatible = "fsl,qoriq-rcpm-1.0",
.data = &qoriq_rcpm_v1_ops,
},
{
.compatible = "fsl,qoriq-rcpm-2.0",
.data = &qoriq_rcpm_v2_ops,
},
{
.compatible = "fsl,qoriq-rcpm-2.1",
.data = &qoriq_rcpm_v2_ops,
},
{},
};
int __init fsl_rcpm_init(void)
{
struct device_node *np;
const struct of_device_id *match;
void __iomem *base;
np = of_find_matching_node_and_match(NULL, rcpm_matches, &match);
if (!np)
return 0;
base = of_iomap(np, 0);
of_node_put(np);
if (!base) {
pr_err("of_iomap() error.\n");
return -ENOMEM;
}
rcpm_v1_regs = base;
rcpm_v2_regs = base;
/* support sleep by default */
fsl_supported_pm_modes = FSL_PM_SLEEP;
qoriq_pm_ops = match->data;
return 0;
}
| linux-master | arch/powerpc/sysdev/fsl_rcpm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2006-2007, Michael Ellerman, IBM Corporation.
*/
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/bitmap.h>
#include <linux/msi.h>
#include <asm/mpic.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/msi_bitmap.h>
#include <sysdev/mpic.h>
void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq)
{
/* The mpic calls this even when there is no allocator setup */
if (!mpic->msi_bitmap.bitmap)
return;
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq);
}
#ifdef CONFIG_MPIC_U3_HT_IRQS
static int __init mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
{
irq_hw_number_t hwirq;
const struct irq_domain_ops *ops = mpic->irqhost->ops;
struct device_node *np;
int flags, index, i;
struct of_phandle_args oirq;
pr_debug("mpic: found U3, guessing msi allocator setup\n");
/* Reserve source numbers we know are reserved in the HW.
*
* This is a bit of a mix of U3 and U4 reserves but that's going
* to work fine, we have plenty enough numbers left so let's just
* mark anything we don't like reserved.
*/
for (i = 0; i < 8; i++)
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
for (i = 42; i < 46; i++)
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
for (i = 100; i < 105; i++)
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
for (i = 124; i < mpic->num_sources; i++)
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
np = NULL;
while ((np = of_find_all_nodes(np))) {
pr_debug("mpic: mapping hwirqs for %pOF\n", np);
index = 0;
while (of_irq_parse_one(np, index++, &oirq) == 0) {
ops->xlate(mpic->irqhost, NULL, oirq.args,
oirq.args_count, &hwirq, &flags);
msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq);
}
}
return 0;
}
#else
static int __init mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
{
return -1;
}
#endif
int __init mpic_msi_init_allocator(struct mpic *mpic)
{
int rc;
rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->num_sources,
irq_domain_get_of_node(mpic->irqhost));
if (rc)
return rc;
rc = msi_bitmap_reserve_dt_hwirqs(&mpic->msi_bitmap);
if (rc > 0) {
if (mpic->flags & MPIC_U3_HT_IRQS)
rc = mpic_msi_reserve_u3_hwirqs(mpic);
if (rc) {
msi_bitmap_free(&mpic->msi_bitmap);
return rc;
}
}
return 0;
}
| linux-master | arch/powerpc/sysdev/mpic_msi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MPIC timer wakeup driver
*
* Copyright 2013 Freescale Semiconductor, Inc.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <asm/mpic_timer.h>
#include <asm/mpic.h>
struct fsl_mpic_timer_wakeup {
struct mpic_timer *timer;
struct work_struct free_work;
};
static struct fsl_mpic_timer_wakeup *fsl_wakeup;
static DEFINE_MUTEX(sysfs_lock);
static void fsl_free_resource(struct work_struct *ws)
{
struct fsl_mpic_timer_wakeup *wakeup =
container_of(ws, struct fsl_mpic_timer_wakeup, free_work);
mutex_lock(&sysfs_lock);
if (wakeup->timer) {
disable_irq_wake(wakeup->timer->irq);
mpic_free_timer(wakeup->timer);
}
wakeup->timer = NULL;
mutex_unlock(&sysfs_lock);
}
static irqreturn_t fsl_mpic_timer_irq(int irq, void *dev_id)
{
struct fsl_mpic_timer_wakeup *wakeup = dev_id;
schedule_work(&wakeup->free_work);
return wakeup->timer ? IRQ_HANDLED : IRQ_NONE;
}
static ssize_t fsl_timer_wakeup_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
time64_t interval = 0;
mutex_lock(&sysfs_lock);
if (fsl_wakeup->timer) {
mpic_get_remain_time(fsl_wakeup->timer, &interval);
interval++;
}
mutex_unlock(&sysfs_lock);
return sprintf(buf, "%lld\n", interval);
}
static ssize_t fsl_timer_wakeup_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
time64_t interval;
int ret;
if (kstrtoll(buf, 0, &interval))
return -EINVAL;
mutex_lock(&sysfs_lock);
if (fsl_wakeup->timer) {
disable_irq_wake(fsl_wakeup->timer->irq);
mpic_free_timer(fsl_wakeup->timer);
fsl_wakeup->timer = NULL;
}
if (!interval) {
mutex_unlock(&sysfs_lock);
return count;
}
fsl_wakeup->timer = mpic_request_timer(fsl_mpic_timer_irq,
fsl_wakeup, interval);
if (!fsl_wakeup->timer) {
mutex_unlock(&sysfs_lock);
return -EINVAL;
}
ret = enable_irq_wake(fsl_wakeup->timer->irq);
if (ret) {
mpic_free_timer(fsl_wakeup->timer);
fsl_wakeup->timer = NULL;
mutex_unlock(&sysfs_lock);
return ret;
}
mpic_start_timer(fsl_wakeup->timer);
mutex_unlock(&sysfs_lock);
return count;
}
static struct device_attribute mpic_attributes = __ATTR(timer_wakeup, 0644,
fsl_timer_wakeup_show, fsl_timer_wakeup_store);
static int __init fsl_wakeup_sys_init(void)
{
struct device *dev_root;
int ret = -EINVAL;
fsl_wakeup = kzalloc(sizeof(struct fsl_mpic_timer_wakeup), GFP_KERNEL);
if (!fsl_wakeup)
return -ENOMEM;
INIT_WORK(&fsl_wakeup->free_work, fsl_free_resource);
dev_root = bus_get_dev_root(&mpic_subsys);
if (dev_root) {
ret = device_create_file(dev_root, &mpic_attributes);
put_device(dev_root);
if (ret)
kfree(fsl_wakeup);
}
return ret;
}
static void __exit fsl_wakeup_sys_exit(void)
{
struct device *dev_root;
dev_root = bus_get_dev_root(&mpic_subsys);
if (dev_root) {
device_remove_file(dev_root, &mpic_attributes);
put_device(dev_root);
}
mutex_lock(&sysfs_lock);
if (fsl_wakeup->timer) {
disable_irq_wake(fsl_wakeup->timer->irq);
mpic_free_timer(fsl_wakeup->timer);
}
kfree(fsl_wakeup);
mutex_unlock(&sysfs_lock);
}
module_init(fsl_wakeup_sys_init);
module_exit(fsl_wakeup_sys_exit);
MODULE_DESCRIPTION("Freescale MPIC global timer wakeup driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Wang Dongsheng <[email protected]>");
| linux-master | arch/powerpc/sysdev/fsl_mpic_timer_wakeup.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* arch/powerpc/sysdev/dart_iommu.c
*
* Copyright (C) 2004 Olof Johansson <[email protected]>, IBM Corporation
* Copyright (C) 2005 Benjamin Herrenschmidt <[email protected]>,
* IBM Corporation
*
* Based on pSeries_iommu.c:
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
* Copyright (C) 2004 Olof Johansson <[email protected]>, IBM Corporation
*
* Dynamic DMA mapping support, Apple U3, U4 & IBM CPC925 "DART" iommu.
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/suspend.h>
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/kmemleak.h>
#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/cacheflush.h>
#include <asm/ppc-pci.h>
#include "dart.h"
/* DART table address and size */
static u32 *dart_tablebase;
static unsigned long dart_tablesize;
/* Mapped base address for the dart */
static unsigned int __iomem *dart;
/* Dummy val that entries are set to when unused */
static unsigned int dart_emptyval;
static struct iommu_table iommu_table_dart;
static int iommu_table_dart_inited;
static int dart_dirty;
static int dart_is_u4;
#define DART_U4_BYPASS_BASE 0x8000000000ull
#define DBG(...)
static DEFINE_SPINLOCK(invalidate_lock);
static inline void dart_tlb_invalidate_all(void)
{
unsigned long l = 0;
unsigned int reg, inv_bit;
unsigned long limit;
unsigned long flags;
spin_lock_irqsave(&invalidate_lock, flags);
DBG("dart: flush\n");
/* To invalidate the DART, set the DARTCNTL_FLUSHTLB bit in the
* control register and wait for it to clear.
*
* Gotcha: Sometimes, the DART won't detect that the bit gets
* set. If so, clear it and set it again.
*/
limit = 0;
inv_bit = dart_is_u4 ? DART_CNTL_U4_FLUSHTLB : DART_CNTL_U3_FLUSHTLB;
retry:
l = 0;
reg = DART_IN(DART_CNTL);
reg |= inv_bit;
DART_OUT(DART_CNTL, reg);
while ((DART_IN(DART_CNTL) & inv_bit) && l < (1L << limit))
l++;
if (l == (1L << limit)) {
if (limit < 4) {
limit++;
reg = DART_IN(DART_CNTL);
reg &= ~inv_bit;
DART_OUT(DART_CNTL, reg);
goto retry;
} else
panic("DART: TLB did not flush after waiting a long "
"time. Buggy U3 ?");
}
spin_unlock_irqrestore(&invalidate_lock, flags);
}
static inline void dart_tlb_invalidate_one(unsigned long bus_rpn)
{
unsigned int reg;
unsigned int l, limit;
unsigned long flags;
spin_lock_irqsave(&invalidate_lock, flags);
reg = DART_CNTL_U4_ENABLE | DART_CNTL_U4_IONE |
(bus_rpn & DART_CNTL_U4_IONE_MASK);
DART_OUT(DART_CNTL, reg);
limit = 0;
wait_more:
l = 0;
while ((DART_IN(DART_CNTL) & DART_CNTL_U4_IONE) && l < (1L << limit)) {
rmb();
l++;
}
if (l == (1L << limit)) {
if (limit < 4) {
limit++;
goto wait_more;
} else
panic("DART: TLB did not flush after waiting a long "
"time. Buggy U4 ?");
}
spin_unlock_irqrestore(&invalidate_lock, flags);
}
static void dart_cache_sync(unsigned int *base, unsigned int count)
{
/*
* We add 1 to the number of entries to flush, following a
* comment in Darwin indicating that the memory controller
* can prefetch unmapped memory under some circumstances.
*/
unsigned long start = (unsigned long)base;
unsigned long end = start + (count + 1) * sizeof(unsigned int);
unsigned int tmp;
/* Perform a standard cache flush */
flush_dcache_range(start, end);
/*
* Perform the sequence described in the CPC925 manual to
* ensure all the data gets to a point the cache incoherent
* DART hardware will see.
*/
asm volatile(" sync;"
" isync;"
" dcbf 0,%1;"
" sync;"
" isync;"
" lwz %0,0(%1);"
" isync" : "=r" (tmp) : "r" (end) : "memory");
}
static void dart_flush(struct iommu_table *tbl)
{
mb();
if (dart_dirty) {
dart_tlb_invalidate_all();
dart_dirty = 0;
}
}
static int dart_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
unsigned long attrs)
{
unsigned int *dp, *orig_dp;
unsigned int rpn;
long l;
DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr);
orig_dp = dp = ((unsigned int*)tbl->it_base) + index;
/* On U3, all memory is contiguous, so we can move this
* out of the loop.
*/
l = npages;
while (l--) {
rpn = __pa(uaddr) >> DART_PAGE_SHIFT;
*(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK);
uaddr += DART_PAGE_SIZE;
}
dart_cache_sync(orig_dp, npages);
if (dart_is_u4) {
rpn = index;
while (npages--)
dart_tlb_invalidate_one(rpn++);
} else {
dart_dirty = 1;
}
return 0;
}
static void dart_free(struct iommu_table *tbl, long index, long npages)
{
unsigned int *dp, *orig_dp;
long orig_npages = npages;
/* We don't worry about flushing the TLB cache. The only drawback of
* not doing it is that we won't catch buggy device drivers doing
* bad DMAs, but then no 32-bit architecture ever does either.
*/
DBG("dart: free at: %lx, %lx\n", index, npages);
orig_dp = dp = ((unsigned int *)tbl->it_base) + index;
while (npages--)
*(dp++) = dart_emptyval;
dart_cache_sync(orig_dp, orig_npages);
}
static void __init allocate_dart(void)
{
unsigned long tmp;
/* 512 pages (2MB) is max DART tablesize. */
dart_tablesize = 1UL << 21;
/*
* 16MB (1 << 24) alignment. We allocate a full 16Mb chuck since we
* will blow up an entire large page anyway in the kernel mapping.
*/
dart_tablebase = memblock_alloc_try_nid_raw(SZ_16M, SZ_16M,
MEMBLOCK_LOW_LIMIT, SZ_2G,
NUMA_NO_NODE);
if (!dart_tablebase)
panic("Failed to allocate 16MB below 2GB for DART table\n");
/* There is no point scanning the DART space for leaks*/
kmemleak_no_scan((void *)dart_tablebase);
/* Allocate a spare page to map all invalid DART pages. We need to do
* that to work around what looks like a problem with the HT bridge
* prefetching into invalid pages and corrupting data
*/
tmp = memblock_phys_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
if (!tmp)
panic("DART: table allocation failed\n");
dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
DARTMAP_RPNMASK);
printk(KERN_INFO "DART table allocated at: %p\n", dart_tablebase);
}
static int __init dart_init(struct device_node *dart_node)
{
unsigned int i;
unsigned long base, size;
struct resource r;
/* IOMMU disabled by the user ? bail out */
if (iommu_is_off)
return -ENODEV;
/*
* Only use the DART if the machine has more than 1GB of RAM
* or if requested with iommu=on on cmdline.
*
* 1GB of RAM is picked as limit because some default devices
* (i.e. Airport Extreme) have 30 bit address range limits.
*/
if (!iommu_force_on && memblock_end_of_DRAM() <= 0x40000000ull)
return -ENODEV;
/* Get DART registers */
if (of_address_to_resource(dart_node, 0, &r))
panic("DART: can't get register base ! ");
/* Map in DART registers */
dart = ioremap(r.start, resource_size(&r));
if (dart == NULL)
panic("DART: Cannot map registers!");
/* Allocate the DART and dummy page */
allocate_dart();
/* Fill initial table */
for (i = 0; i < dart_tablesize/4; i++)
dart_tablebase[i] = dart_emptyval;
/* Push to memory */
dart_cache_sync(dart_tablebase, dart_tablesize / sizeof(u32));
/* Initialize DART with table base and enable it. */
base = ((unsigned long)dart_tablebase) >> DART_PAGE_SHIFT;
size = dart_tablesize >> DART_PAGE_SHIFT;
if (dart_is_u4) {
size &= DART_SIZE_U4_SIZE_MASK;
DART_OUT(DART_BASE_U4, base);
DART_OUT(DART_SIZE_U4, size);
DART_OUT(DART_CNTL, DART_CNTL_U4_ENABLE);
} else {
size &= DART_CNTL_U3_SIZE_MASK;
DART_OUT(DART_CNTL,
DART_CNTL_U3_ENABLE |
(base << DART_CNTL_U3_BASE_SHIFT) |
(size << DART_CNTL_U3_SIZE_SHIFT));
}
/* Invalidate DART to get rid of possible stale TLBs */
dart_tlb_invalidate_all();
printk(KERN_INFO "DART IOMMU initialized for %s type chipset\n",
dart_is_u4 ? "U4" : "U3");
return 0;
}
static struct iommu_table_ops iommu_dart_ops = {
.set = dart_build,
.clear = dart_free,
.flush = dart_flush,
};
static void iommu_table_dart_setup(void)
{
iommu_table_dart.it_busno = 0;
iommu_table_dart.it_offset = 0;
/* it_size is in number of entries */
iommu_table_dart.it_size = dart_tablesize / sizeof(u32);
iommu_table_dart.it_page_shift = IOMMU_PAGE_SHIFT_4K;
/* Initialize the common IOMMU code */
iommu_table_dart.it_base = (unsigned long)dart_tablebase;
iommu_table_dart.it_index = 0;
iommu_table_dart.it_blocksize = 1;
iommu_table_dart.it_ops = &iommu_dart_ops;
if (!iommu_init_table(&iommu_table_dart, -1, 0, 0))
panic("Failed to initialize iommu table");
/* Reserve the last page of the DART to avoid possible prefetch
* past the DART mapped area
*/
set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map);
}
static void pci_dma_bus_setup_dart(struct pci_bus *bus)
{
if (!iommu_table_dart_inited) {
iommu_table_dart_inited = 1;
iommu_table_dart_setup();
}
}
static bool dart_device_on_pcie(struct device *dev)
{
struct device_node *np = of_node_get(dev->of_node);
while(np) {
if (of_device_is_compatible(np, "U4-pcie") ||
of_device_is_compatible(np, "u4-pcie")) {
of_node_put(np);
return true;
}
np = of_get_next_parent(np);
}
return false;
}
static void pci_dma_dev_setup_dart(struct pci_dev *dev)
{
if (dart_is_u4 && dart_device_on_pcie(&dev->dev))
dev->dev.archdata.dma_offset = DART_U4_BYPASS_BASE;
set_iommu_table_base(&dev->dev, &iommu_table_dart);
}
static bool iommu_bypass_supported_dart(struct pci_dev *dev, u64 mask)
{
return dart_is_u4 &&
dart_device_on_pcie(&dev->dev) &&
mask >= DMA_BIT_MASK(40);
}
void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
{
struct device_node *dn;
/* Find the DART in the device-tree */
dn = of_find_compatible_node(NULL, "dart", "u3-dart");
if (dn == NULL) {
dn = of_find_compatible_node(NULL, "dart", "u4-dart");
if (dn == NULL)
return; /* use default direct_dma_ops */
dart_is_u4 = 1;
}
/* Initialize the DART HW */
if (dart_init(dn) != 0) {
of_node_put(dn);
return;
}
/*
* U4 supports a DART bypass, we use it for 64-bit capable devices to
* improve performance. However, that only works for devices connected
* to the U4 own PCIe interface, not bridged through hypertransport.
* We need the device to support at least 40 bits of addresses.
*/
controller_ops->dma_dev_setup = pci_dma_dev_setup_dart;
controller_ops->dma_bus_setup = pci_dma_bus_setup_dart;
controller_ops->iommu_bypass_supported = iommu_bypass_supported_dart;
/* Setup pci_dma ops */
set_pci_dma_ops(&dma_iommu_ops);
of_node_put(dn);
}
#ifdef CONFIG_PM
static void iommu_dart_restore(void)
{
dart_cache_sync(dart_tablebase, dart_tablesize / sizeof(u32));
dart_tlb_invalidate_all();
}
static int __init iommu_init_late_dart(void)
{
if (!dart_tablebase)
return 0;
ppc_md.iommu_restore = iommu_dart_restore;
return 0;
}
late_initcall(iommu_init_late_dart);
#endif /* CONFIG_PM */
| linux-master | arch/powerpc/sysdev/dart_iommu.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MPC83xx/85xx/86xx PCI/PCIE support routing.
*
* Copyright 2007-2012 Freescale Semiconductor, Inc.
* Copyright 2008-2009 MontaVista Software, Inc.
*
* Initial author: Xianghua Xiao <[email protected]>
* Recode: ZHANG WEI <[email protected]>
* Rewrite the routing for Frescale PCI and PCI Express
* Roy Zang <[email protected]>
* MPC83xx PCI-Express support:
* Tony Li <[email protected]>
* Anton Vorontsov <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/fsl/edac.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/memblock.h>
#include <linux/log2.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/machdep.h>
#include <asm/mpc85xx.h>
#include <asm/disassemble.h>
#include <asm/ppc-opcode.h>
#include <asm/swiotlb.h>
#include <asm/setup.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
static void quirk_fsl_pcie_early(struct pci_dev *dev)
{
u8 hdr_type;
/* if we aren't a PCIe don't bother */
if (!pci_is_pcie(dev))
return;
/* if we aren't in host mode don't bother */
pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
return;
dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
fsl_pcie_bus_fixup = 1;
return;
}
static int fsl_indirect_read_config(struct pci_bus *, unsigned int,
int, int, u32 *);
static int fsl_pcie_check_link(struct pci_controller *hose)
{
u32 val = 0;
if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
if (hose->ops->read == fsl_indirect_read_config)
__indirect_read_config(hose, hose->first_busno, 0,
PCIE_LTSSM, 4, &val);
else
early_read_config_dword(hose, 0, 0, PCIE_LTSSM, &val);
if (val < PCIE_LTSSM_L0)
return 1;
} else {
struct ccsr_pci __iomem *pci = hose->private_data;
/* for PCIe IP rev 3.0 or greater use CSR0 for link state */
val = (in_be32(&pci->pex_csr0) & PEX_CSR0_LTSSM_MASK)
>> PEX_CSR0_LTSSM_SHIFT;
if (val != PEX_CSR0_LTSSM_L0)
return 1;
}
return 0;
}
static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
if (fsl_pcie_check_link(hose))
hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
else
hose->indirect_type &= ~PPC_INDIRECT_TYPE_NO_PCIE_LINK;
return indirect_read_config(bus, devfn, offset, len, val);
}
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
static struct pci_ops fsl_indirect_pcie_ops =
{
.read = fsl_indirect_read_config,
.write = indirect_write_config,
};
static u64 pci64_dma_offset;
#ifdef CONFIG_SWIOTLB
static void pci_dma_dev_setup_swiotlb(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
pdev->dev.bus_dma_limit =
hose->dma_window_base_cur + hose->dma_window_size - 1;
}
static void setup_swiotlb_ops(struct pci_controller *hose)
{
if (ppc_swiotlb_enable)
hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
}
#else
static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
#endif
static void fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
{
/*
* Fix up PCI devices that are able to DMA to the large inbound
* mapping that allows addressing any RAM address from across PCI.
*/
if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
dev->bus_dma_limit = 0;
dev->archdata.dma_offset = pci64_dma_offset;
}
}
static int setup_one_atmu(struct ccsr_pci __iomem *pci,
unsigned int index, const struct resource *res,
resource_size_t offset)
{
resource_size_t pci_addr = res->start - offset;
resource_size_t phys_addr = res->start;
resource_size_t size = resource_size(res);
u32 flags = 0x80044000; /* enable & mem R/W */
unsigned int i;
pr_debug("PCI MEM resource start 0x%016llx, size 0x%016llx.\n",
(u64)res->start, (u64)size);
if (res->flags & IORESOURCE_PREFETCH)
flags |= 0x10000000; /* enable relaxed ordering */
for (i = 0; size > 0; i++) {
unsigned int bits = min_t(u32, ilog2(size),
__ffs(pci_addr | phys_addr));
if (index + i >= 5)
return -1;
out_be32(&pci->pow[index + i].potar, pci_addr >> 12);
out_be32(&pci->pow[index + i].potear, (u64)pci_addr >> 44);
out_be32(&pci->pow[index + i].powbar, phys_addr >> 12);
out_be32(&pci->pow[index + i].powar, flags | (bits - 1));
pci_addr += (resource_size_t)1U << bits;
phys_addr += (resource_size_t)1U << bits;
size -= (resource_size_t)1U << bits;
}
return i;
}
static bool is_kdump(void)
{
struct device_node *node;
bool ret;
node = of_find_node_by_type(NULL, "memory");
if (!node) {
WARN_ON_ONCE(1);
return false;
}
ret = of_property_read_bool(node, "linux,usable-memory");
of_node_put(node);
return ret;
}
/* atmu setup for fsl pci/pcie controller */
static void setup_pci_atmu(struct pci_controller *hose)
{
struct ccsr_pci __iomem *pci = hose->private_data;
int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4;
u64 mem, sz, paddr_hi = 0;
u64 offset = 0, paddr_lo = ULLONG_MAX;
u32 pcicsrbar = 0, pcicsrbar_sz;
u32 piwar = PIWAR_EN | PIWAR_PF | PIWAR_TGI_LOCAL |
PIWAR_READ_SNOOP | PIWAR_WRITE_SNOOP;
const u64 *reg;
int len;
bool setup_inbound;
/*
* If this is kdump, we don't want to trigger a bunch of PCI
* errors by closing the window on in-flight DMA.
*
* We still run most of the function's logic so that things like
* hose->dma_window_size still get set.
*/
setup_inbound = !is_kdump();
if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
/*
* BSC9132 Rev1.0 has an issue where all the PEX inbound
* windows have implemented the default target value as 0xf
* for CCSR space.In all Freescale legacy devices the target
* of 0xf is reserved for local memory space. 9132 Rev1.0
* now has local memory space mapped to target 0x0 instead of
* 0xf. Hence adding a workaround to remove the target 0xf
* defined for memory space from Inbound window attributes.
*/
piwar &= ~PIWAR_TGI_LOCAL;
}
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
win_idx = 2;
start_idx = 0;
end_idx = 3;
}
}
/* Disable all windows (except powar0 since it's ignored) */
for(i = 1; i < 5; i++)
out_be32(&pci->pow[i].powar, 0);
if (setup_inbound) {
for (i = start_idx; i < end_idx; i++)
out_be32(&pci->piw[i].piwar, 0);
}
/* Setup outbound MEM window */
for(i = 0, j = 1; i < 3; i++) {
if (!(hose->mem_resources[i].flags & IORESOURCE_MEM))
continue;
paddr_lo = min(paddr_lo, (u64)hose->mem_resources[i].start);
paddr_hi = max(paddr_hi, (u64)hose->mem_resources[i].end);
/* We assume all memory resources have the same offset */
offset = hose->mem_offset[i];
n = setup_one_atmu(pci, j, &hose->mem_resources[i], offset);
if (n < 0 || j >= 5) {
pr_err("Ran out of outbound PCI ATMUs for resource %d!\n", i);
hose->mem_resources[i].flags |= IORESOURCE_DISABLED;
} else
j += n;
}
/* Setup outbound IO window */
if (hose->io_resource.flags & IORESOURCE_IO) {
if (j >= 5) {
pr_err("Ran out of outbound PCI ATMUs for IO resource\n");
} else {
pr_debug("PCI IO resource start 0x%016llx, size 0x%016llx, "
"phy base 0x%016llx.\n",
(u64)hose->io_resource.start,
(u64)resource_size(&hose->io_resource),
(u64)hose->io_base_phys);
out_be32(&pci->pow[j].potar, (hose->io_resource.start >> 12));
out_be32(&pci->pow[j].potear, 0);
out_be32(&pci->pow[j].powbar, (hose->io_base_phys >> 12));
/* Enable, IO R/W */
out_be32(&pci->pow[j].powar, 0x80088000
| (ilog2(hose->io_resource.end
- hose->io_resource.start + 1) - 1));
}
}
/* convert to pci address space */
paddr_hi -= offset;
paddr_lo -= offset;
if (paddr_hi == paddr_lo) {
pr_err("%pOF: No outbound window space\n", hose->dn);
return;
}
if (paddr_lo == 0) {
pr_err("%pOF: No space for inbound window\n", hose->dn);
return;
}
/* setup PCSRBAR/PEXCSRBAR */
early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, 0xffffffff);
early_read_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, &pcicsrbar_sz);
pcicsrbar_sz = ~pcicsrbar_sz + 1;
if (paddr_hi < (0x100000000ull - pcicsrbar_sz) ||
(paddr_lo > 0x100000000ull))
pcicsrbar = 0x100000000ull - pcicsrbar_sz;
else
pcicsrbar = (paddr_lo - pcicsrbar_sz) & -pcicsrbar_sz;
early_write_config_dword(hose, 0, 0, PCI_BASE_ADDRESS_0, pcicsrbar);
paddr_lo = min(paddr_lo, (u64)pcicsrbar);
pr_info("%pOF: PCICSRBAR @ 0x%x\n", hose->dn, pcicsrbar);
/* Setup inbound mem window */
mem = memblock_end_of_DRAM();
pr_info("%s: end of DRAM %llx\n", __func__, mem);
/*
* The msi-address-64 property, if it exists, indicates the physical
* address of the MSIIR register. Normally, this register is located
* inside CCSR, so the ATMU that covers all of CCSR is used. But if
* this property exists, then we normally need to create a new ATMU
* for it. For now, however, we cheat. The only entity that creates
* this property is the Freescale hypervisor, and the address is
* specified in the partition configuration. Typically, the address
* is located in the page immediately after the end of DDR. If so, we
* can avoid allocating a new ATMU by extending the DDR ATMU by one
* page.
*/
reg = of_get_property(hose->dn, "msi-address-64", &len);
if (reg && (len == sizeof(u64))) {
u64 address = be64_to_cpup(reg);
if ((address >= mem) && (address < (mem + PAGE_SIZE))) {
pr_info("%pOF: extending DDR ATMU to cover MSIIR", hose->dn);
mem += PAGE_SIZE;
} else {
/* TODO: Create a new ATMU for MSIIR */
pr_warn("%pOF: msi-address-64 address of %llx is "
"unsupported\n", hose->dn, address);
}
}
sz = min(mem, paddr_lo);
mem_log = ilog2(sz);
/* PCIe can overmap inbound & outbound since RX & TX are separated */
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
/* Size window to exact size if power-of-two or one size up */
if ((1ull << mem_log) != mem) {
mem_log++;
if ((1ull << mem_log) > mem)
pr_info("%pOF: Setting PCI inbound window "
"greater than memory size\n", hose->dn);
}
piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
if (setup_inbound) {
/* Setup inbound memory window */
out_be32(&pci->piw[win_idx].pitar, 0x00000000);
out_be32(&pci->piw[win_idx].piwbar, 0x00000000);
out_be32(&pci->piw[win_idx].piwar, piwar);
}
win_idx--;
hose->dma_window_base_cur = 0x00000000;
hose->dma_window_size = (resource_size_t)sz;
/*
* if we have >4G of memory setup second PCI inbound window to
* let devices that are 64-bit address capable to work w/o
* SWIOTLB and access the full range of memory
*/
if (sz != mem) {
mem_log = ilog2(mem);
/* Size window up if we dont fit in exact power-of-2 */
if ((1ull << mem_log) != mem)
mem_log++;
piwar = (piwar & ~PIWAR_SZ_MASK) | (mem_log - 1);
pci64_dma_offset = 1ULL << mem_log;
if (setup_inbound) {
/* Setup inbound memory window */
out_be32(&pci->piw[win_idx].pitar, 0x00000000);
out_be32(&pci->piw[win_idx].piwbear,
pci64_dma_offset >> 44);
out_be32(&pci->piw[win_idx].piwbar,
pci64_dma_offset >> 12);
out_be32(&pci->piw[win_idx].piwar, piwar);
}
/*
* install our own dma_set_mask handler to fixup dma_ops
* and dma_offset
*/
ppc_md.dma_set_mask = fsl_pci_dma_set_mask;
pr_info("%pOF: Setup 64-bit PCI DMA window\n", hose->dn);
}
} else {
u64 paddr = 0;
if (setup_inbound) {
/* Setup inbound memory window */
out_be32(&pci->piw[win_idx].pitar, paddr >> 12);
out_be32(&pci->piw[win_idx].piwbar, paddr >> 12);
out_be32(&pci->piw[win_idx].piwar,
(piwar | (mem_log - 1)));
}
win_idx--;
paddr += 1ull << mem_log;
sz -= 1ull << mem_log;
if (sz) {
mem_log = ilog2(sz);
piwar |= (mem_log - 1);
if (setup_inbound) {
out_be32(&pci->piw[win_idx].pitar,
paddr >> 12);
out_be32(&pci->piw[win_idx].piwbar,
paddr >> 12);
out_be32(&pci->piw[win_idx].piwar, piwar);
}
win_idx--;
paddr += 1ull << mem_log;
}
hose->dma_window_base_cur = 0x00000000;
hose->dma_window_size = (resource_size_t)paddr;
}
if (hose->dma_window_size < mem) {
#ifdef CONFIG_SWIOTLB
ppc_swiotlb_enable = 1;
#else
pr_err("%pOF: ERROR: Memory size exceeds PCI ATMU ability to "
"map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
hose->dn);
#endif
/* adjusting outbound windows could reclaim space in mem map */
if (paddr_hi < 0xffffffffull)
pr_warn("%pOF: WARNING: Outbound window cfg leaves "
"gaps in memory map. Adjusting the memory map "
"could reduce unnecessary bounce buffering.\n",
hose->dn);
pr_info("%pOF: DMA window size is 0x%llx\n", hose->dn,
(u64)hose->dma_window_size);
}
}
static void setup_pci_cmd(struct pci_controller *hose)
{
u16 cmd;
int cap_x;
early_read_config_word(hose, 0, 0, PCI_COMMAND, &cmd);
cmd |= PCI_COMMAND_SERR | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
| PCI_COMMAND_IO;
early_write_config_word(hose, 0, 0, PCI_COMMAND, cmd);
cap_x = early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX);
if (cap_x) {
int pci_x_cmd = cap_x + PCI_X_CMD;
cmd = PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ
| PCI_X_CMD_ERO | PCI_X_CMD_DPERR_E;
early_write_config_word(hose, 0, 0, pci_x_cmd, cmd);
} else {
early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0x80);
}
}
void fsl_pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_controller *hose = pci_bus_to_host(bus);
int i, is_pcie = 0, no_link;
/* The root complex bridge comes up with bogus resources,
* we copy the PHB ones in.
*
* With the current generic PCI code, the PHB bus no longer
* has bus->resource[0..4] set, so things are a bit more
* tricky.
*/
if (fsl_pcie_bus_fixup)
is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
if (bus->parent == hose->bus && (is_pcie || no_link)) {
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
struct resource *res = bus->resource[i];
struct resource *par;
if (!res)
continue;
if (i == 0)
par = &hose->io_resource;
else if (i < 4)
par = &hose->mem_resources[i-1];
else par = NULL;
res->start = par ? par->start : 0;
res->end = par ? par->end : 0;
res->flags = par ? par->flags : 0;
}
}
}
static int fsl_add_bridge(struct platform_device *pdev, int is_primary)
{
int len;
struct pci_controller *hose;
struct resource rsrc;
const int *bus_range;
u8 hdr_type, progif;
u32 class_code;
struct device_node *dev;
struct ccsr_pci __iomem *pci;
u16 temp;
u32 svr = mfspr(SPRN_SVR);
dev = pdev->dev.of_node;
if (!of_device_is_available(dev)) {
pr_warn("%pOF: disabled\n", dev);
return -ENODEV;
}
pr_debug("Adding PCI host bridge %pOF\n", dev);
/* Fetch host bridge registers address */
if (of_address_to_resource(dev, 0, &rsrc)) {
printk(KERN_WARNING "Can't get pci register base!");
return -ENOMEM;
}
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int))
printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
" bus 0\n", dev);
pci_add_flags(PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
/* set platform device as the parent */
hose->parent = &pdev->dev;
hose->first_busno = bus_range ? bus_range[0] : 0x0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n",
(u64)rsrc.start, (u64)resource_size(&rsrc));
pci = hose->private_data = ioremap(rsrc.start, resource_size(&rsrc));
if (!hose->private_data)
goto no_bridge;
setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
PPC_INDIRECT_TYPE_BIG_ENDIAN);
if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
/* use fsl_indirect_read_config for PCIe */
hose->ops = &fsl_indirect_pcie_ops;
/* For PCIE read HEADER_TYPE to identify controller mode */
early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
goto no_bridge;
} else {
/* For PCI read PROG to identify controller mode */
early_read_config_byte(hose, 0, 0, PCI_CLASS_PROG, &progif);
if ((progif & 1) &&
!of_property_read_bool(dev, "fsl,pci-agent-force-enum"))
goto no_bridge;
}
setup_pci_cmd(hose);
/* check PCI express link status */
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
hose->indirect_type |= PPC_INDIRECT_TYPE_EXT_REG |
PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
if (fsl_pcie_check_link(hose))
hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
/* Fix Class Code to PCI_CLASS_BRIDGE_PCI_NORMAL for pre-3.0 controller */
if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) {
early_read_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, &class_code);
class_code &= 0xff;
class_code |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
early_write_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, class_code);
}
} else {
/*
* Set PBFR(PCI Bus Function Register)[10] = 1 to
* disable the combining of crossing cacheline
* boundary requests into one burst transaction.
* PCI-X operation is not affected.
* Fix erratum PCI 5 on MPC8548
*/
#define PCI_BUS_FUNCTION 0x44
#define PCI_BUS_FUNCTION_MDS 0x400 /* Master disable streaming */
if (((SVR_SOC_VER(svr) == SVR_8543) ||
(SVR_SOC_VER(svr) == SVR_8545) ||
(SVR_SOC_VER(svr) == SVR_8547) ||
(SVR_SOC_VER(svr) == SVR_8548)) &&
!early_find_capability(hose, 0, 0, PCI_CAP_ID_PCIX)) {
early_read_config_word(hose, 0, 0,
PCI_BUS_FUNCTION, &temp);
temp |= PCI_BUS_FUNCTION_MDS;
early_write_config_word(hose, 0, 0,
PCI_BUS_FUNCTION, temp);
}
}
printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
"Firmware bus number: %d->%d\n",
(unsigned long long)rsrc.start, hose->first_busno,
hose->last_busno);
pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
hose, hose->cfg_addr, hose->cfg_data);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, is_primary);
/* Setup PEX window registers */
setup_pci_atmu(hose);
/* Set up controller operations */
setup_swiotlb_ops(hose);
return 0;
no_bridge:
iounmap(hose->private_data);
/* unmap cfg_data & cfg_addr separately if not on same page */
if (((unsigned long)hose->cfg_data & PAGE_MASK) !=
((unsigned long)hose->cfg_addr & PAGE_MASK))
iounmap(hose->cfg_data);
iounmap(hose->cfg_addr);
pcibios_free_controller(hose);
return -ENODEV;
}
#endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID,
quirk_fsl_pcie_early);
#if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x)
struct mpc83xx_pcie_priv {
void __iomem *cfg_type0;
void __iomem *cfg_type1;
u32 dev_base;
};
struct pex_inbound_window {
u32 ar;
u32 tar;
u32 barl;
u32 barh;
};
/*
* With the convention of u-boot, the PCIE outbound window 0 serves
* as configuration transactions outbound.
*/
#define PEX_OUTWIN0_BAR 0xCA4
#define PEX_OUTWIN0_TAL 0xCA8
#define PEX_OUTWIN0_TAH 0xCAC
#define PEX_RC_INWIN_BASE 0xE60
#define PEX_RCIWARn_EN 0x1
static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn)
{
struct pci_controller *hose = pci_bus_to_host(bus);
if (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
* Workaround for the HW bug: for Type 0 configure transactions the
* PCI-E controller does not check the device number bits and just
* assumes that the device number bits are 0.
*/
if (bus->number == hose->first_busno ||
bus->primary == hose->first_busno) {
if (devfn & 0xf8)
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (ppc_md.pci_exclude_device) {
if (ppc_md.pci_exclude_device(hose, bus->number, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
}
return PCIBIOS_SUCCESSFUL;
}
static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus,
unsigned int devfn, int offset)
{
struct pci_controller *hose = pci_bus_to_host(bus);
struct mpc83xx_pcie_priv *pcie = hose->dn->data;
u32 dev_base = bus->number << 24 | devfn << 16;
int ret;
ret = mpc83xx_pcie_exclude_device(bus, devfn);
if (ret)
return NULL;
offset &= 0xfff;
/* Type 0 */
if (bus->number == hose->first_busno)
return pcie->cfg_type0 + offset;
if (pcie->dev_base == dev_base)
goto mapped;
out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, dev_base);
pcie->dev_base = dev_base;
mapped:
return pcie->cfg_type1 + offset;
}
static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 val)
{
struct pci_controller *hose = pci_bus_to_host(bus);
/* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */
if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno)
val &= 0xffffff00;
return pci_generic_config_write(bus, devfn, offset, len, val);
}
static struct pci_ops mpc83xx_pcie_ops = {
.map_bus = mpc83xx_pcie_remap_cfg,
.read = pci_generic_config_read,
.write = mpc83xx_pcie_write_config,
};
static int __init mpc83xx_pcie_setup(struct pci_controller *hose,
struct resource *reg)
{
struct mpc83xx_pcie_priv *pcie;
u32 cfg_bar;
int ret = -ENOMEM;
pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return ret;
pcie->cfg_type0 = ioremap(reg->start, resource_size(reg));
if (!pcie->cfg_type0)
goto err0;
cfg_bar = in_le32(pcie->cfg_type0 + PEX_OUTWIN0_BAR);
if (!cfg_bar) {
/* PCI-E isn't configured. */
ret = -ENODEV;
goto err1;
}
pcie->cfg_type1 = ioremap(cfg_bar, 0x1000);
if (!pcie->cfg_type1)
goto err1;
WARN_ON(hose->dn->data);
hose->dn->data = pcie;
hose->ops = &mpc83xx_pcie_ops;
hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAH, 0);
out_le32(pcie->cfg_type0 + PEX_OUTWIN0_TAL, 0);
if (fsl_pcie_check_link(hose))
hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
return 0;
err1:
iounmap(pcie->cfg_type0);
err0:
kfree(pcie);
return ret;
}
int __init mpc83xx_add_bridge(struct device_node *dev)
{
int ret;
int len;
struct pci_controller *hose;
struct resource rsrc_reg;
struct resource rsrc_cfg;
const int *bus_range;
int primary;
is_mpc83xx_pci = 1;
if (!of_device_is_available(dev)) {
pr_warn("%pOF: disabled by the firmware.\n",
dev);
return -ENODEV;
}
pr_debug("Adding PCI host bridge %pOF\n", dev);
/* Fetch host bridge registers address */
if (of_address_to_resource(dev, 0, &rsrc_reg)) {
printk(KERN_WARNING "Can't get pci register base!\n");
return -ENOMEM;
}
memset(&rsrc_cfg, 0, sizeof(rsrc_cfg));
if (of_address_to_resource(dev, 1, &rsrc_cfg)) {
printk(KERN_WARNING
"No pci config register base in dev tree, "
"using default\n");
/*
* MPC83xx supports up to two host controllers
* one at 0x8500 has config space registers at 0x8300
* one at 0x8600 has config space registers at 0x8380
*/
if ((rsrc_reg.start & 0xfffff) == 0x8500)
rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8300;
else if ((rsrc_reg.start & 0xfffff) == 0x8600)
rsrc_cfg.start = (rsrc_reg.start & 0xfff00000) + 0x8380;
}
/*
* Controller at offset 0x8500 is primary
*/
if ((rsrc_reg.start & 0xfffff) == 0x8500)
primary = 1;
else
primary = 0;
/* Get bus range if any */
bus_range = of_get_property(dev, "bus-range", &len);
if (bus_range == NULL || len < 2 * sizeof(int)) {
printk(KERN_WARNING "Can't get bus-range for %pOF, assume"
" bus 0\n", dev);
}
pci_add_flags(PCI_REASSIGN_ALL_BUS);
hose = pcibios_alloc_controller(dev);
if (!hose)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
if (of_device_is_compatible(dev, "fsl,mpc8314-pcie")) {
ret = mpc83xx_pcie_setup(hose, &rsrc_reg);
if (ret)
goto err0;
} else {
setup_indirect_pci(hose, rsrc_cfg.start,
rsrc_cfg.start + 4, 0);
}
printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
"Firmware bus number: %d->%d\n",
(unsigned long long)rsrc_reg.start, hose->first_busno,
hose->last_busno);
pr_debug(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
hose, hose->cfg_addr, hose->cfg_data);
/* Interpret the "ranges" property */
/* This also maps the I/O region and sets isa_io/mem_base */
pci_process_bridge_OF_ranges(hose, dev, primary);
return 0;
err0:
pcibios_free_controller(hose);
return ret;
}
#endif /* CONFIG_PPC_83xx */
u64 fsl_pci_immrbar_base(struct pci_controller *hose)
{
#ifdef CONFIG_PPC_83xx
if (is_mpc83xx_pci) {
struct mpc83xx_pcie_priv *pcie = hose->dn->data;
struct pex_inbound_window *in;
int i;
/* Walk the Root Complex Inbound windows to match IMMR base */
in = pcie->cfg_type0 + PEX_RC_INWIN_BASE;
for (i = 0; i < 4; i++) {
/* not enabled, skip */
if (!(in_le32(&in[i].ar) & PEX_RCIWARn_EN))
continue;
if (get_immrbase() == in_le32(&in[i].tar))
return (u64)in_le32(&in[i].barh) << 32 |
in_le32(&in[i].barl);
}
printk(KERN_WARNING "could not find PCI BAR matching IMMR\n");
}
#endif
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
if (!is_mpc83xx_pci) {
u32 base;
pci_bus_read_config_dword(hose->bus,
PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base);
/*
* For PEXCSRBAR, bit 3-0 indicate prefetchable and
* address type. So when getting base address, these
* bits should be masked
*/
base &= PCI_BASE_ADDRESS_MEM_MASK;
return base;
}
#endif
return 0;
}
#ifdef CONFIG_PPC_E500
static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
{
unsigned int rd, ra, rb, d;
rd = get_rt(inst);
ra = get_ra(inst);
rb = get_rb(inst);
d = get_d(inst);
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_LWZX:
case OP_31_XOP_LWBRX:
regs->gpr[rd] = 0xffffffff;
break;
case OP_31_XOP_LWZUX:
regs->gpr[rd] = 0xffffffff;
regs->gpr[ra] += regs->gpr[rb];
break;
case OP_31_XOP_LBZX:
regs->gpr[rd] = 0xff;
break;
case OP_31_XOP_LBZUX:
regs->gpr[rd] = 0xff;
regs->gpr[ra] += regs->gpr[rb];
break;
case OP_31_XOP_LHZX:
case OP_31_XOP_LHBRX:
regs->gpr[rd] = 0xffff;
break;
case OP_31_XOP_LHZUX:
regs->gpr[rd] = 0xffff;
regs->gpr[ra] += regs->gpr[rb];
break;
case OP_31_XOP_LHAX:
regs->gpr[rd] = ~0UL;
break;
case OP_31_XOP_LHAUX:
regs->gpr[rd] = ~0UL;
regs->gpr[ra] += regs->gpr[rb];
break;
default:
return 0;
}
break;
case OP_LWZ:
regs->gpr[rd] = 0xffffffff;
break;
case OP_LWZU:
regs->gpr[rd] = 0xffffffff;
regs->gpr[ra] += (s16)d;
break;
case OP_LBZ:
regs->gpr[rd] = 0xff;
break;
case OP_LBZU:
regs->gpr[rd] = 0xff;
regs->gpr[ra] += (s16)d;
break;
case OP_LHZ:
regs->gpr[rd] = 0xffff;
break;
case OP_LHZU:
regs->gpr[rd] = 0xffff;
regs->gpr[ra] += (s16)d;
break;
case OP_LHA:
regs->gpr[rd] = ~0UL;
break;
case OP_LHAU:
regs->gpr[rd] = ~0UL;
regs->gpr[ra] += (s16)d;
break;
default:
return 0;
}
return 1;
}
static int is_in_pci_mem_space(phys_addr_t addr)
{
struct pci_controller *hose;
struct resource *res;
int i;
list_for_each_entry(hose, &hose_list, list_node) {
if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
continue;
for (i = 0; i < 3; i++) {
res = &hose->mem_resources[i];
if ((res->flags & IORESOURCE_MEM) &&
addr >= res->start && addr <= res->end)
return 1;
}
}
return 0;
}
int fsl_pci_mcheck_exception(struct pt_regs *regs)
{
u32 inst;
int ret;
phys_addr_t addr = 0;
/* Let KVM/QEMU deal with the exception */
if (regs->msr & MSR_GS)
return 0;
#ifdef CONFIG_PHYS_64BIT
addr = mfspr(SPRN_MCARU);
addr <<= 32;
#endif
addr += mfspr(SPRN_MCAR);
if (is_in_pci_mem_space(addr)) {
if (user_mode(regs))
ret = copy_from_user_nofault(&inst,
(void __user *)regs->nip, sizeof(inst));
else
ret = get_kernel_nofault(inst, (void *)regs->nip);
if (!ret && mcheck_handle_load(regs, inst)) {
regs_add_return_ip(regs, 4);
return 1;
}
}
return 0;
}
#endif
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
static const struct of_device_id pci_ids[] = {
{ .compatible = "fsl,mpc8540-pci", },
{ .compatible = "fsl,mpc8548-pcie", },
{ .compatible = "fsl,mpc8610-pci", },
{ .compatible = "fsl,mpc8641-pcie", },
{ .compatible = "fsl,qoriq-pcie", },
{ .compatible = "fsl,qoriq-pcie-v2.1", },
{ .compatible = "fsl,qoriq-pcie-v2.2", },
{ .compatible = "fsl,qoriq-pcie-v2.3", },
{ .compatible = "fsl,qoriq-pcie-v2.4", },
{ .compatible = "fsl,qoriq-pcie-v3.0", },
/*
* The following entries are for compatibility with older device
* trees.
*/
{ .compatible = "fsl,p1022-pcie", },
{ .compatible = "fsl,p4080-pcie", },
{},
};
struct device_node *fsl_pci_primary;
void __init fsl_pci_assign_primary(void)
{
struct device_node *np;
/* Callers can specify the primary bus using other means. */
if (fsl_pci_primary)
return;
/* If a PCI host bridge contains an ISA node, it's primary. */
np = of_find_node_by_type(NULL, "isa");
while ((fsl_pci_primary = of_get_parent(np))) {
of_node_put(np);
np = fsl_pci_primary;
if (of_match_node(pci_ids, np) && of_device_is_available(np))
return;
}
/*
* If there's no PCI host bridge with ISA then check for
* PCI host bridge with alias "pci0" (first PCI host bridge).
*/
np = of_find_node_by_path("pci0");
if (np && of_match_node(pci_ids, np) && of_device_is_available(np)) {
fsl_pci_primary = np;
of_node_put(np);
return;
}
if (np)
of_node_put(np);
/*
* If there's no PCI host bridge with ISA, arbitrarily
* designate one as primary. This can go away once
* various bugs with primary-less systems are fixed.
*/
for_each_matching_node(np, pci_ids) {
if (of_device_is_available(np)) {
fsl_pci_primary = np;
return;
}
}
}
#ifdef CONFIG_PM_SLEEP
static irqreturn_t fsl_pci_pme_handle(int irq, void *dev_id)
{
struct pci_controller *hose = dev_id;
struct ccsr_pci __iomem *pci = hose->private_data;
u32 dr;
dr = in_be32(&pci->pex_pme_mes_dr);
if (!dr)
return IRQ_NONE;
out_be32(&pci->pex_pme_mes_dr, dr);
return IRQ_HANDLED;
}
static int fsl_pci_pme_probe(struct pci_controller *hose)
{
struct ccsr_pci __iomem *pci;
struct pci_dev *dev;
int pme_irq;
int res;
u16 pms;
/* Get hose's pci_dev */
dev = list_first_entry(&hose->bus->devices, typeof(*dev), bus_list);
/* PME Disable */
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
pms &= ~PCI_PM_CTRL_PME_ENABLE;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
pme_irq = irq_of_parse_and_map(hose->dn, 0);
if (!pme_irq) {
dev_err(&dev->dev, "Failed to map PME interrupt.\n");
return -ENXIO;
}
res = devm_request_irq(hose->parent, pme_irq,
fsl_pci_pme_handle,
IRQF_SHARED,
"[PCI] PME", hose);
if (res < 0) {
dev_err(&dev->dev, "Unable to request irq %d for PME\n", pme_irq);
irq_dispose_mapping(pme_irq);
return -ENODEV;
}
pci = hose->private_data;
/* Enable PTOD, ENL23D & EXL23D */
clrbits32(&pci->pex_pme_mes_disr,
PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
out_be32(&pci->pex_pme_mes_ier, 0);
setbits32(&pci->pex_pme_mes_ier,
PME_DISR_EN_PTOD | PME_DISR_EN_ENL23D | PME_DISR_EN_EXL23D);
/* PME Enable */
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pms);
pms |= PCI_PM_CTRL_PME_ENABLE;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pms);
return 0;
}
static void send_pme_turnoff_message(struct pci_controller *hose)
{
struct ccsr_pci __iomem *pci = hose->private_data;
u32 dr;
int i;
/* Send PME_Turn_Off Message Request */
setbits32(&pci->pex_pmcr, PEX_PMCR_PTOMR);
/* Wait trun off done */
for (i = 0; i < 150; i++) {
dr = in_be32(&pci->pex_pme_mes_dr);
if (dr) {
out_be32(&pci->pex_pme_mes_dr, dr);
break;
}
udelay(1000);
}
}
static void fsl_pci_syscore_do_suspend(struct pci_controller *hose)
{
send_pme_turnoff_message(hose);
}
static int fsl_pci_syscore_suspend(void)
{
struct pci_controller *hose, *tmp;
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
fsl_pci_syscore_do_suspend(hose);
return 0;
}
static void fsl_pci_syscore_do_resume(struct pci_controller *hose)
{
struct ccsr_pci __iomem *pci = hose->private_data;
u32 dr;
int i;
/* Send Exit L2 State Message */
setbits32(&pci->pex_pmcr, PEX_PMCR_EXL2S);
/* Wait exit done */
for (i = 0; i < 150; i++) {
dr = in_be32(&pci->pex_pme_mes_dr);
if (dr) {
out_be32(&pci->pex_pme_mes_dr, dr);
break;
}
udelay(1000);
}
setup_pci_atmu(hose);
}
static void fsl_pci_syscore_resume(void)
{
struct pci_controller *hose, *tmp;
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
fsl_pci_syscore_do_resume(hose);
}
static struct syscore_ops pci_syscore_pm_ops = {
.suspend = fsl_pci_syscore_suspend,
.resume = fsl_pci_syscore_resume,
};
#endif
void fsl_pcibios_fixup_phb(struct pci_controller *phb)
{
#ifdef CONFIG_PM_SLEEP
fsl_pci_pme_probe(phb);
#endif
}
static int add_err_dev(struct platform_device *pdev)
{
struct platform_device *errdev;
struct mpc85xx_edac_pci_plat_data pd = {
.of_node = pdev->dev.of_node
};
errdev = platform_device_register_resndata(&pdev->dev,
"mpc85xx-pci-edac",
PLATFORM_DEVID_AUTO,
pdev->resource,
pdev->num_resources,
&pd, sizeof(pd));
return PTR_ERR_OR_ZERO(errdev);
}
static int fsl_pci_probe(struct platform_device *pdev)
{
struct device_node *node;
int ret;
node = pdev->dev.of_node;
ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
if (ret)
return ret;
ret = add_err_dev(pdev);
if (ret)
dev_err(&pdev->dev, "couldn't register error device: %d\n",
ret);
return 0;
}
static struct platform_driver fsl_pci_driver = {
.driver = {
.name = "fsl-pci",
.of_match_table = pci_ids,
},
.probe = fsl_pci_probe,
.driver_managed_dma = true,
};
static int __init fsl_pci_init(void)
{
#ifdef CONFIG_PM_SLEEP
register_syscore_ops(&pci_syscore_pm_ops);
#endif
return platform_driver_register(&fsl_pci_driver);
}
arch_initcall(fsl_pci_init);
#endif
| linux-master | arch/powerpc/sysdev/fsl_pci.c |
/*
* General Purpose functions for the global management of the
* 8260 Communication Processor Module.
* Copyright (c) 1999-2001 Dan Malek <[email protected]>
* Copyright (c) 2000 MontaVista Software, Inc ([email protected])
* 2.3.99 Updates
*
* 2006 (c) MontaVista Software, Inc.
* Vitaly Bordug <[email protected]>
* Merged to arch/powerpc from arch/ppc/syslib/cpm2_common.c
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
/*
*
* In addition to the individual control of the communication
* channels, there are a few functions that globally affect the
* communication processor.
*
* Buffer descriptors must be allocated from the dual ported memory
* space. The allocator for that is here. When the communication
* process is reset, we reclaim the memory available. There is
* currently no deallocator for this memory.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/cpm2.h>
#include <asm/rheap.h>
#include <sysdev/fsl_soc.h>
cpm_cpm2_t __iomem *cpmp; /* Pointer to comm processor space */
/* We allocate this here because it is used almost exclusively for
* the communication processor devices.
*/
cpm2_map_t __iomem *cpm2_immr;
EXPORT_SYMBOL(cpm2_immr);
#define CPM_MAP_SIZE (0x40000) /* 256k - the PQ3 reserve this amount
of space for CPM as it is larger
than on PQ2 */
void __init cpm2_reset(void)
{
#ifdef CONFIG_PPC_85xx
cpm2_immr = ioremap(get_immrbase() + 0x80000, CPM_MAP_SIZE);
#else
cpm2_immr = ioremap(get_immrbase(), CPM_MAP_SIZE);
#endif
/* Tell everyone where the comm processor resides.
*/
cpmp = &cpm2_immr->im_cpm;
#ifndef CONFIG_PPC_EARLY_DEBUG_CPM
/* Reset the CPM.
*/
cpm_command(CPM_CR_RST, 0);
#endif
}
static DEFINE_SPINLOCK(cmd_lock);
#define MAX_CR_CMD_LOOPS 10000
int cpm_command(u32 command, u8 opcode)
{
int i, ret;
unsigned long flags;
spin_lock_irqsave(&cmd_lock, flags);
ret = 0;
out_be32(&cpmp->cp_cpcr, command | opcode | CPM_CR_FLG);
for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
if ((in_be32(&cpmp->cp_cpcr) & CPM_CR_FLG) == 0)
goto out;
printk(KERN_ERR "%s(): Not able to issue CPM command\n", __func__);
ret = -EIO;
out:
spin_unlock_irqrestore(&cmd_lock, flags);
return ret;
}
EXPORT_SYMBOL(cpm_command);
/* Set a baud rate generator. This needs lots of work. There are
* eight BRGs, which can be connected to the CPM channels or output
* as clocks. The BRGs are in two different block of internal
* memory mapped space.
* The baud rate clock is the system clock divided by something.
* It was set up long ago during the initial boot phase and is
* given to us.
* Baud rate clocks are zero-based in the driver code (as that maps
* to port numbers). Documentation uses 1-based numbering.
*/
void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src)
{
u32 __iomem *bp;
u32 val;
/* This is good enough to get SMCs running.....
*/
if (brg < 4) {
bp = &cpm2_immr->im_brgc1;
} else {
bp = &cpm2_immr->im_brgc5;
brg -= 4;
}
bp += brg;
/* Round the clock divider to the nearest integer. */
val = (((clk * 2 / rate) - 1) & ~1) | CPM_BRG_EN | src;
if (div16)
val |= CPM_BRG_DIV16;
out_be32(bp, val);
}
EXPORT_SYMBOL(__cpm2_setbrg);
int __init cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
{
int ret = 0;
int shift;
int i, bits = 0;
u32 __iomem *reg;
u32 mask = 7;
u8 clk_map[][3] = {
{CPM_CLK_FCC1, CPM_BRG5, 0},
{CPM_CLK_FCC1, CPM_BRG6, 1},
{CPM_CLK_FCC1, CPM_BRG7, 2},
{CPM_CLK_FCC1, CPM_BRG8, 3},
{CPM_CLK_FCC1, CPM_CLK9, 4},
{CPM_CLK_FCC1, CPM_CLK10, 5},
{CPM_CLK_FCC1, CPM_CLK11, 6},
{CPM_CLK_FCC1, CPM_CLK12, 7},
{CPM_CLK_FCC2, CPM_BRG5, 0},
{CPM_CLK_FCC2, CPM_BRG6, 1},
{CPM_CLK_FCC2, CPM_BRG7, 2},
{CPM_CLK_FCC2, CPM_BRG8, 3},
{CPM_CLK_FCC2, CPM_CLK13, 4},
{CPM_CLK_FCC2, CPM_CLK14, 5},
{CPM_CLK_FCC2, CPM_CLK15, 6},
{CPM_CLK_FCC2, CPM_CLK16, 7},
{CPM_CLK_FCC3, CPM_BRG5, 0},
{CPM_CLK_FCC3, CPM_BRG6, 1},
{CPM_CLK_FCC3, CPM_BRG7, 2},
{CPM_CLK_FCC3, CPM_BRG8, 3},
{CPM_CLK_FCC3, CPM_CLK13, 4},
{CPM_CLK_FCC3, CPM_CLK14, 5},
{CPM_CLK_FCC3, CPM_CLK15, 6},
{CPM_CLK_FCC3, CPM_CLK16, 7},
{CPM_CLK_SCC1, CPM_BRG1, 0},
{CPM_CLK_SCC1, CPM_BRG2, 1},
{CPM_CLK_SCC1, CPM_BRG3, 2},
{CPM_CLK_SCC1, CPM_BRG4, 3},
{CPM_CLK_SCC1, CPM_CLK11, 4},
{CPM_CLK_SCC1, CPM_CLK12, 5},
{CPM_CLK_SCC1, CPM_CLK3, 6},
{CPM_CLK_SCC1, CPM_CLK4, 7},
{CPM_CLK_SCC2, CPM_BRG1, 0},
{CPM_CLK_SCC2, CPM_BRG2, 1},
{CPM_CLK_SCC2, CPM_BRG3, 2},
{CPM_CLK_SCC2, CPM_BRG4, 3},
{CPM_CLK_SCC2, CPM_CLK11, 4},
{CPM_CLK_SCC2, CPM_CLK12, 5},
{CPM_CLK_SCC2, CPM_CLK3, 6},
{CPM_CLK_SCC2, CPM_CLK4, 7},
{CPM_CLK_SCC3, CPM_BRG1, 0},
{CPM_CLK_SCC3, CPM_BRG2, 1},
{CPM_CLK_SCC3, CPM_BRG3, 2},
{CPM_CLK_SCC3, CPM_BRG4, 3},
{CPM_CLK_SCC3, CPM_CLK5, 4},
{CPM_CLK_SCC3, CPM_CLK6, 5},
{CPM_CLK_SCC3, CPM_CLK7, 6},
{CPM_CLK_SCC3, CPM_CLK8, 7},
{CPM_CLK_SCC4, CPM_BRG1, 0},
{CPM_CLK_SCC4, CPM_BRG2, 1},
{CPM_CLK_SCC4, CPM_BRG3, 2},
{CPM_CLK_SCC4, CPM_BRG4, 3},
{CPM_CLK_SCC4, CPM_CLK5, 4},
{CPM_CLK_SCC4, CPM_CLK6, 5},
{CPM_CLK_SCC4, CPM_CLK7, 6},
{CPM_CLK_SCC4, CPM_CLK8, 7},
};
switch (target) {
case CPM_CLK_SCC1:
reg = &cpm2_immr->im_cpmux.cmx_scr;
shift = 24;
break;
case CPM_CLK_SCC2:
reg = &cpm2_immr->im_cpmux.cmx_scr;
shift = 16;
break;
case CPM_CLK_SCC3:
reg = &cpm2_immr->im_cpmux.cmx_scr;
shift = 8;
break;
case CPM_CLK_SCC4:
reg = &cpm2_immr->im_cpmux.cmx_scr;
shift = 0;
break;
case CPM_CLK_FCC1:
reg = &cpm2_immr->im_cpmux.cmx_fcr;
shift = 24;
break;
case CPM_CLK_FCC2:
reg = &cpm2_immr->im_cpmux.cmx_fcr;
shift = 16;
break;
case CPM_CLK_FCC3:
reg = &cpm2_immr->im_cpmux.cmx_fcr;
shift = 8;
break;
default:
printk(KERN_ERR "cpm2_clock_setup: invalid clock target\n");
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
if (clk_map[i][0] == target && clk_map[i][1] == clock) {
bits = clk_map[i][2];
break;
}
}
if (i == ARRAY_SIZE(clk_map))
ret = -EINVAL;
bits <<= shift;
mask <<= shift;
if (mode == CPM_CLK_RTX) {
bits |= bits << 3;
mask |= mask << 3;
} else if (mode == CPM_CLK_RX) {
bits <<= 3;
mask <<= 3;
}
out_be32(reg, (in_be32(reg) & ~mask) | bits);
return ret;
}
int __init cpm2_smc_clk_setup(enum cpm_clk_target target, int clock)
{
int ret = 0;
int shift;
int i, bits = 0;
u8 __iomem *reg;
u8 mask = 3;
u8 clk_map[][3] = {
{CPM_CLK_SMC1, CPM_BRG1, 0},
{CPM_CLK_SMC1, CPM_BRG7, 1},
{CPM_CLK_SMC1, CPM_CLK7, 2},
{CPM_CLK_SMC1, CPM_CLK9, 3},
{CPM_CLK_SMC2, CPM_BRG2, 0},
{CPM_CLK_SMC2, CPM_BRG8, 1},
{CPM_CLK_SMC2, CPM_CLK4, 2},
{CPM_CLK_SMC2, CPM_CLK15, 3},
};
switch (target) {
case CPM_CLK_SMC1:
reg = &cpm2_immr->im_cpmux.cmx_smr;
mask = 3;
shift = 4;
break;
case CPM_CLK_SMC2:
reg = &cpm2_immr->im_cpmux.cmx_smr;
mask = 3;
shift = 0;
break;
default:
printk(KERN_ERR "cpm2_smc_clock_setup: invalid clock target\n");
return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(clk_map); i++) {
if (clk_map[i][0] == target && clk_map[i][1] == clock) {
bits = clk_map[i][2];
break;
}
}
if (i == ARRAY_SIZE(clk_map))
ret = -EINVAL;
bits <<= shift;
mask <<= shift;
out_8(reg, (in_8(reg) & ~mask) | bits);
return ret;
}
struct cpm2_ioports {
u32 dir, par, sor, odr, dat;
u32 res[3];
};
void __init cpm2_set_pin(int port, int pin, int flags)
{
struct cpm2_ioports __iomem *iop =
(struct cpm2_ioports __iomem *)&cpm2_immr->im_ioport;
pin = 1 << (31 - pin);
if (flags & CPM_PIN_OUTPUT)
setbits32(&iop[port].dir, pin);
else
clrbits32(&iop[port].dir, pin);
if (!(flags & CPM_PIN_GPIO))
setbits32(&iop[port].par, pin);
else
clrbits32(&iop[port].par, pin);
if (flags & CPM_PIN_SECONDARY)
setbits32(&iop[port].sor, pin);
else
clrbits32(&iop[port].sor, pin);
if (flags & CPM_PIN_OPENDRAIN)
setbits32(&iop[port].odr, pin);
else
clrbits32(&iop[port].odr, pin);
}
| linux-master | arch/powerpc/sysdev/cpm2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* MPIC timer driver
*
* Copyright 2013 Freescale Semiconductor, Inc.
* Author: Dongsheng Wang <[email protected]>
* Li Yang <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
#include <sysdev/fsl_soc.h>
#include <asm/io.h>
#include <asm/mpic_timer.h>
#define FSL_GLOBAL_TIMER 0x1
/* Clock Ratio
* Divide by 64 0x00000300
* Divide by 32 0x00000200
* Divide by 16 0x00000100
* Divide by 8 0x00000000 (Hardware default div)
*/
#define MPIC_TIMER_TCR_CLKDIV 0x00000300
#define MPIC_TIMER_TCR_ROVR_OFFSET 24
#define TIMER_STOP 0x80000000
#define GTCCR_TOG 0x80000000
#define TIMERS_PER_GROUP 4
#define MAX_TICKS (~0U >> 1)
#define MAX_TICKS_CASCADE (~0U)
#define TIMER_OFFSET(num) (1 << (TIMERS_PER_GROUP - 1 - num))
struct timer_regs {
u32 gtccr;
u32 res0[3];
u32 gtbcr;
u32 res1[3];
u32 gtvpr;
u32 res2[3];
u32 gtdr;
u32 res3[3];
};
struct cascade_priv {
u32 tcr_value; /* TCR register: CASC & ROVR value */
unsigned int cascade_map; /* cascade map */
unsigned int timer_num; /* cascade control timer */
};
struct timer_group_priv {
struct timer_regs __iomem *regs;
struct mpic_timer timer[TIMERS_PER_GROUP];
struct list_head node;
unsigned int timerfreq;
unsigned int idle;
unsigned int flags;
spinlock_t lock;
void __iomem *group_tcr;
};
static struct cascade_priv cascade_timer[] = {
/* cascade timer 0 and 1 */
{0x1, 0xc, 0x1},
/* cascade timer 1 and 2 */
{0x2, 0x6, 0x2},
/* cascade timer 2 and 3 */
{0x4, 0x3, 0x3}
};
static LIST_HEAD(timer_group_list);
static void convert_ticks_to_time(struct timer_group_priv *priv,
const u64 ticks, time64_t *time)
{
*time = (u64)div_u64(ticks, priv->timerfreq);
}
/* the time set by the user is converted to "ticks" */
static int convert_time_to_ticks(struct timer_group_priv *priv,
time64_t time, u64 *ticks)
{
u64 max_value; /* prevent u64 overflow */
max_value = div_u64(ULLONG_MAX, priv->timerfreq);
if (time > max_value)
return -EINVAL;
*ticks = (u64)time * (u64)priv->timerfreq;
return 0;
}
/* detect whether there is a cascade timer available */
static struct mpic_timer *detect_idle_cascade_timer(
struct timer_group_priv *priv)
{
struct cascade_priv *casc_priv;
unsigned int map;
unsigned int array_size = ARRAY_SIZE(cascade_timer);
unsigned int num;
unsigned int i;
unsigned long flags;
casc_priv = cascade_timer;
for (i = 0; i < array_size; i++) {
spin_lock_irqsave(&priv->lock, flags);
map = casc_priv->cascade_map & priv->idle;
if (map == casc_priv->cascade_map) {
num = casc_priv->timer_num;
priv->timer[num].cascade_handle = casc_priv;
/* set timer busy */
priv->idle &= ~casc_priv->cascade_map;
spin_unlock_irqrestore(&priv->lock, flags);
return &priv->timer[num];
}
spin_unlock_irqrestore(&priv->lock, flags);
casc_priv++;
}
return NULL;
}
static int set_cascade_timer(struct timer_group_priv *priv, u64 ticks,
unsigned int num)
{
struct cascade_priv *casc_priv;
u32 tcr;
u32 tmp_ticks;
u32 rem_ticks;
/* set group tcr reg for cascade */
casc_priv = priv->timer[num].cascade_handle;
if (!casc_priv)
return -EINVAL;
tcr = casc_priv->tcr_value |
(casc_priv->tcr_value << MPIC_TIMER_TCR_ROVR_OFFSET);
setbits32(priv->group_tcr, tcr);
tmp_ticks = div_u64_rem(ticks, MAX_TICKS_CASCADE, &rem_ticks);
out_be32(&priv->regs[num].gtccr, 0);
out_be32(&priv->regs[num].gtbcr, tmp_ticks | TIMER_STOP);
out_be32(&priv->regs[num - 1].gtccr, 0);
out_be32(&priv->regs[num - 1].gtbcr, rem_ticks);
return 0;
}
static struct mpic_timer *get_cascade_timer(struct timer_group_priv *priv,
u64 ticks)
{
struct mpic_timer *allocated_timer;
/* Two cascade timers: Support the maximum time */
const u64 max_ticks = (u64)MAX_TICKS * (u64)MAX_TICKS_CASCADE;
int ret;
if (ticks > max_ticks)
return NULL;
/* detect idle timer */
allocated_timer = detect_idle_cascade_timer(priv);
if (!allocated_timer)
return NULL;
/* set ticks to timer */
ret = set_cascade_timer(priv, ticks, allocated_timer->num);
if (ret < 0)
return NULL;
return allocated_timer;
}
static struct mpic_timer *get_timer(time64_t time)
{
struct timer_group_priv *priv;
struct mpic_timer *timer;
u64 ticks;
unsigned int num;
unsigned int i;
unsigned long flags;
int ret;
list_for_each_entry(priv, &timer_group_list, node) {
ret = convert_time_to_ticks(priv, time, &ticks);
if (ret < 0)
return NULL;
if (ticks > MAX_TICKS) {
if (!(priv->flags & FSL_GLOBAL_TIMER))
return NULL;
timer = get_cascade_timer(priv, ticks);
if (!timer)
continue;
return timer;
}
for (i = 0; i < TIMERS_PER_GROUP; i++) {
/* one timer: Reverse allocation */
num = TIMERS_PER_GROUP - 1 - i;
spin_lock_irqsave(&priv->lock, flags);
if (priv->idle & (1 << i)) {
/* set timer busy */
priv->idle &= ~(1 << i);
/* set ticks & stop timer */
out_be32(&priv->regs[num].gtbcr,
ticks | TIMER_STOP);
out_be32(&priv->regs[num].gtccr, 0);
priv->timer[num].cascade_handle = NULL;
spin_unlock_irqrestore(&priv->lock, flags);
return &priv->timer[num];
}
spin_unlock_irqrestore(&priv->lock, flags);
}
}
return NULL;
}
/**
* mpic_start_timer - start hardware timer
* @handle: the timer to be started.
*
* It will do ->fn(->dev) callback from the hardware interrupt at
* the 'time64_t' point in the future.
*/
void mpic_start_timer(struct mpic_timer *handle)
{
struct timer_group_priv *priv = container_of(handle,
struct timer_group_priv, timer[handle->num]);
clrbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
}
EXPORT_SYMBOL(mpic_start_timer);
/**
* mpic_stop_timer - stop hardware timer
* @handle: the timer to be stopped
*
* The timer periodically generates an interrupt. Unless user stops the timer.
*/
void mpic_stop_timer(struct mpic_timer *handle)
{
struct timer_group_priv *priv = container_of(handle,
struct timer_group_priv, timer[handle->num]);
struct cascade_priv *casc_priv;
setbits32(&priv->regs[handle->num].gtbcr, TIMER_STOP);
casc_priv = priv->timer[handle->num].cascade_handle;
if (casc_priv) {
out_be32(&priv->regs[handle->num].gtccr, 0);
out_be32(&priv->regs[handle->num - 1].gtccr, 0);
} else {
out_be32(&priv->regs[handle->num].gtccr, 0);
}
}
EXPORT_SYMBOL(mpic_stop_timer);
/**
* mpic_get_remain_time - get timer time
* @handle: the timer to be selected.
* @time: time for timer
*
* Query timer remaining time.
*/
void mpic_get_remain_time(struct mpic_timer *handle, time64_t *time)
{
struct timer_group_priv *priv = container_of(handle,
struct timer_group_priv, timer[handle->num]);
struct cascade_priv *casc_priv;
u64 ticks;
u32 tmp_ticks;
casc_priv = priv->timer[handle->num].cascade_handle;
if (casc_priv) {
tmp_ticks = in_be32(&priv->regs[handle->num].gtccr);
tmp_ticks &= ~GTCCR_TOG;
ticks = ((u64)tmp_ticks & UINT_MAX) * (u64)MAX_TICKS_CASCADE;
tmp_ticks = in_be32(&priv->regs[handle->num - 1].gtccr);
ticks += tmp_ticks;
} else {
ticks = in_be32(&priv->regs[handle->num].gtccr);
ticks &= ~GTCCR_TOG;
}
convert_ticks_to_time(priv, ticks, time);
}
EXPORT_SYMBOL(mpic_get_remain_time);
/**
* mpic_free_timer - free hardware timer
* @handle: the timer to be removed.
*
* Free the timer.
*
* Note: can not be used in interrupt context.
*/
void mpic_free_timer(struct mpic_timer *handle)
{
struct timer_group_priv *priv = container_of(handle,
struct timer_group_priv, timer[handle->num]);
struct cascade_priv *casc_priv;
unsigned long flags;
mpic_stop_timer(handle);
casc_priv = priv->timer[handle->num].cascade_handle;
free_irq(priv->timer[handle->num].irq, priv->timer[handle->num].dev);
spin_lock_irqsave(&priv->lock, flags);
if (casc_priv) {
u32 tcr;
tcr = casc_priv->tcr_value | (casc_priv->tcr_value <<
MPIC_TIMER_TCR_ROVR_OFFSET);
clrbits32(priv->group_tcr, tcr);
priv->idle |= casc_priv->cascade_map;
priv->timer[handle->num].cascade_handle = NULL;
} else {
priv->idle |= TIMER_OFFSET(handle->num);
}
spin_unlock_irqrestore(&priv->lock, flags);
}
EXPORT_SYMBOL(mpic_free_timer);
/**
* mpic_request_timer - get a hardware timer
* @fn: interrupt handler function
* @dev: callback function of the data
* @time: time for timer
*
* This executes the "request_irq", returning NULL
* else "handle" on success.
*/
struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
time64_t time)
{
struct mpic_timer *allocated_timer;
int ret;
if (list_empty(&timer_group_list))
return NULL;
if (time < 0)
return NULL;
allocated_timer = get_timer(time);
if (!allocated_timer)
return NULL;
ret = request_irq(allocated_timer->irq, fn,
IRQF_TRIGGER_LOW, "global-timer", dev);
if (ret) {
mpic_free_timer(allocated_timer);
return NULL;
}
allocated_timer->dev = dev;
return allocated_timer;
}
EXPORT_SYMBOL(mpic_request_timer);
static int __init timer_group_get_freq(struct device_node *np,
struct timer_group_priv *priv)
{
u32 div;
if (priv->flags & FSL_GLOBAL_TIMER) {
struct device_node *dn;
dn = of_find_compatible_node(NULL, NULL, "fsl,mpic");
if (dn) {
of_property_read_u32(dn, "clock-frequency",
&priv->timerfreq);
of_node_put(dn);
}
}
if (priv->timerfreq <= 0)
return -EINVAL;
if (priv->flags & FSL_GLOBAL_TIMER) {
div = (1 << (MPIC_TIMER_TCR_CLKDIV >> 8)) * 8;
priv->timerfreq /= div;
}
return 0;
}
static int __init timer_group_get_irq(struct device_node *np,
struct timer_group_priv *priv)
{
const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
const u32 *p;
u32 offset;
u32 count;
unsigned int i;
unsigned int j;
unsigned int irq_index = 0;
unsigned int irq;
int len;
p = of_get_property(np, "fsl,available-ranges", &len);
if (p && len % (2 * sizeof(u32)) != 0) {
pr_err("%pOF: malformed available-ranges property.\n", np);
return -EINVAL;
}
if (!p) {
p = all_timer;
len = sizeof(all_timer);
}
len /= 2 * sizeof(u32);
for (i = 0; i < len; i++) {
offset = p[i * 2];
count = p[i * 2 + 1];
for (j = 0; j < count; j++) {
irq = irq_of_parse_and_map(np, irq_index);
if (!irq) {
pr_err("%pOF: irq parse and map failed.\n", np);
return -EINVAL;
}
/* Set timer idle */
priv->idle |= TIMER_OFFSET((offset + j));
priv->timer[offset + j].irq = irq;
priv->timer[offset + j].num = offset + j;
irq_index++;
}
}
return 0;
}
static void __init timer_group_init(struct device_node *np)
{
struct timer_group_priv *priv;
unsigned int i = 0;
int ret;
priv = kzalloc(sizeof(struct timer_group_priv), GFP_KERNEL);
if (!priv) {
pr_err("%pOF: cannot allocate memory for group.\n", np);
return;
}
if (of_device_is_compatible(np, "fsl,mpic-global-timer"))
priv->flags |= FSL_GLOBAL_TIMER;
priv->regs = of_iomap(np, i++);
if (!priv->regs) {
pr_err("%pOF: cannot ioremap timer register address.\n", np);
goto out;
}
if (priv->flags & FSL_GLOBAL_TIMER) {
priv->group_tcr = of_iomap(np, i++);
if (!priv->group_tcr) {
pr_err("%pOF: cannot ioremap tcr address.\n", np);
goto out;
}
}
ret = timer_group_get_freq(np, priv);
if (ret < 0) {
pr_err("%pOF: cannot get timer frequency.\n", np);
goto out;
}
ret = timer_group_get_irq(np, priv);
if (ret < 0) {
pr_err("%pOF: cannot get timer irqs.\n", np);
goto out;
}
spin_lock_init(&priv->lock);
/* Init FSL timer hardware */
if (priv->flags & FSL_GLOBAL_TIMER)
setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
list_add_tail(&priv->node, &timer_group_list);
return;
out:
if (priv->regs)
iounmap(priv->regs);
if (priv->group_tcr)
iounmap(priv->group_tcr);
kfree(priv);
}
static void mpic_timer_resume(void)
{
struct timer_group_priv *priv;
list_for_each_entry(priv, &timer_group_list, node) {
/* Init FSL timer hardware */
if (priv->flags & FSL_GLOBAL_TIMER)
setbits32(priv->group_tcr, MPIC_TIMER_TCR_CLKDIV);
}
}
static const struct of_device_id mpic_timer_ids[] = {
{ .compatible = "fsl,mpic-global-timer", },
{},
};
static struct syscore_ops mpic_timer_syscore_ops = {
.resume = mpic_timer_resume,
};
static int __init mpic_timer_init(void)
{
struct device_node *np = NULL;
for_each_matching_node(np, mpic_timer_ids)
timer_group_init(np);
register_syscore_ops(&mpic_timer_syscore_ops);
if (list_empty(&timer_group_list))
return -ENODEV;
return 0;
}
subsys_initcall(mpic_timer_init);
| linux-master | arch/powerpc/sysdev/mpic_timer.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2011-2012, Meador Inge, Mentor Graphics Corporation.
*
* Some ideas based on un-pushed work done by Vivek Mahajan, Jason Jin, and
* Mingkai Hu from Freescale Semiconductor, Inc.
*/
#include <linux/list.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/mpic_msgr.h>
#define MPIC_MSGR_REGISTERS_PER_BLOCK 4
#define MPIC_MSGR_STRIDE 0x10
#define MPIC_MSGR_MER_OFFSET (0x100 / sizeof(u32))
#define MSGR_INUSE 0
#define MSGR_FREE 1
static struct mpic_msgr **mpic_msgrs;
static unsigned int mpic_msgr_count;
static DEFINE_RAW_SPINLOCK(msgrs_lock);
static inline void _mpic_msgr_mer_write(struct mpic_msgr *msgr, u32 value)
{
out_be32(msgr->mer, value);
}
static inline u32 _mpic_msgr_mer_read(struct mpic_msgr *msgr)
{
return in_be32(msgr->mer);
}
static inline void _mpic_msgr_disable(struct mpic_msgr *msgr)
{
u32 mer = _mpic_msgr_mer_read(msgr);
_mpic_msgr_mer_write(msgr, mer & ~(1 << msgr->num));
}
struct mpic_msgr *mpic_msgr_get(unsigned int reg_num)
{
unsigned long flags;
struct mpic_msgr *msgr;
/* Assume busy until proven otherwise. */
msgr = ERR_PTR(-EBUSY);
if (reg_num >= mpic_msgr_count)
return ERR_PTR(-ENODEV);
raw_spin_lock_irqsave(&msgrs_lock, flags);
msgr = mpic_msgrs[reg_num];
if (msgr->in_use == MSGR_FREE)
msgr->in_use = MSGR_INUSE;
raw_spin_unlock_irqrestore(&msgrs_lock, flags);
return msgr;
}
EXPORT_SYMBOL_GPL(mpic_msgr_get);
void mpic_msgr_put(struct mpic_msgr *msgr)
{
unsigned long flags;
raw_spin_lock_irqsave(&msgr->lock, flags);
msgr->in_use = MSGR_FREE;
_mpic_msgr_disable(msgr);
raw_spin_unlock_irqrestore(&msgr->lock, flags);
}
EXPORT_SYMBOL_GPL(mpic_msgr_put);
void mpic_msgr_enable(struct mpic_msgr *msgr)
{
unsigned long flags;
u32 mer;
raw_spin_lock_irqsave(&msgr->lock, flags);
mer = _mpic_msgr_mer_read(msgr);
_mpic_msgr_mer_write(msgr, mer | (1 << msgr->num));
raw_spin_unlock_irqrestore(&msgr->lock, flags);
}
EXPORT_SYMBOL_GPL(mpic_msgr_enable);
void mpic_msgr_disable(struct mpic_msgr *msgr)
{
unsigned long flags;
raw_spin_lock_irqsave(&msgr->lock, flags);
_mpic_msgr_disable(msgr);
raw_spin_unlock_irqrestore(&msgr->lock, flags);
}
EXPORT_SYMBOL_GPL(mpic_msgr_disable);
/* The following three functions are used to compute the order and number of
* the message register blocks. They are clearly very inefficient. However,
* they are called *only* a few times during device initialization.
*/
static unsigned int mpic_msgr_number_of_blocks(void)
{
unsigned int count;
struct device_node *aliases;
count = 0;
aliases = of_find_node_by_name(NULL, "aliases");
if (aliases) {
char buf[32];
for (;;) {
snprintf(buf, sizeof(buf), "mpic-msgr-block%d", count);
if (!of_property_present(aliases, buf))
break;
count += 1;
}
of_node_put(aliases);
}
return count;
}
static unsigned int mpic_msgr_number_of_registers(void)
{
return mpic_msgr_number_of_blocks() * MPIC_MSGR_REGISTERS_PER_BLOCK;
}
static int mpic_msgr_block_number(struct device_node *node)
{
struct device_node *aliases;
unsigned int index, number_of_blocks;
char buf[64];
number_of_blocks = mpic_msgr_number_of_blocks();
aliases = of_find_node_by_name(NULL, "aliases");
if (!aliases)
return -1;
for (index = 0; index < number_of_blocks; ++index) {
struct property *prop;
struct device_node *tn;
snprintf(buf, sizeof(buf), "mpic-msgr-block%d", index);
prop = of_find_property(aliases, buf, NULL);
tn = of_find_node_by_path(prop->value);
if (node == tn) {
of_node_put(tn);
break;
}
of_node_put(tn);
}
of_node_put(aliases);
return index == number_of_blocks ? -1 : index;
}
/* The probe function for a single message register block.
*/
static int mpic_msgr_probe(struct platform_device *dev)
{
void __iomem *msgr_block_addr;
int block_number;
struct resource rsrc;
unsigned int i;
unsigned int irq_index;
struct device_node *np = dev->dev.of_node;
unsigned int receive_mask;
const unsigned int *prop;
if (!np) {
dev_err(&dev->dev, "Device OF-Node is NULL");
return -EFAULT;
}
/* Allocate the message register array upon the first device
* registered.
*/
if (!mpic_msgrs) {
mpic_msgr_count = mpic_msgr_number_of_registers();
dev_info(&dev->dev, "Found %d message registers\n",
mpic_msgr_count);
mpic_msgrs = kcalloc(mpic_msgr_count, sizeof(*mpic_msgrs),
GFP_KERNEL);
if (!mpic_msgrs) {
dev_err(&dev->dev,
"No memory for message register blocks\n");
return -ENOMEM;
}
}
dev_info(&dev->dev, "Of-device full name %pOF\n", np);
/* IO map the message register block. */
of_address_to_resource(np, 0, &rsrc);
msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc));
if (!msgr_block_addr) {
dev_err(&dev->dev, "Failed to iomap MPIC message registers");
return -EFAULT;
}
/* Ensure the block has a defined order. */
block_number = mpic_msgr_block_number(np);
if (block_number < 0) {
dev_err(&dev->dev,
"Failed to find message register block alias\n");
return -ENODEV;
}
dev_info(&dev->dev, "Setting up message register block %d\n",
block_number);
/* Grab the receive mask which specifies what registers can receive
* interrupts.
*/
prop = of_get_property(np, "mpic-msgr-receive-mask", NULL);
receive_mask = (prop) ? *prop : 0xF;
/* Build up the appropriate message register data structures. */
for (i = 0, irq_index = 0; i < MPIC_MSGR_REGISTERS_PER_BLOCK; ++i) {
struct mpic_msgr *msgr;
unsigned int reg_number;
msgr = kzalloc(sizeof(struct mpic_msgr), GFP_KERNEL);
if (!msgr) {
dev_err(&dev->dev, "No memory for message register\n");
return -ENOMEM;
}
reg_number = block_number * MPIC_MSGR_REGISTERS_PER_BLOCK + i;
msgr->base = msgr_block_addr + i * MPIC_MSGR_STRIDE;
msgr->mer = msgr->base + MPIC_MSGR_MER_OFFSET;
msgr->in_use = MSGR_FREE;
msgr->num = i;
raw_spin_lock_init(&msgr->lock);
if (receive_mask & (1 << i)) {
msgr->irq = irq_of_parse_and_map(np, irq_index);
if (!msgr->irq) {
dev_err(&dev->dev,
"Missing interrupt specifier");
kfree(msgr);
return -EFAULT;
}
irq_index += 1;
} else {
msgr->irq = 0;
}
mpic_msgrs[reg_number] = msgr;
mpic_msgr_disable(msgr);
dev_info(&dev->dev, "Register %d initialized: irq %d\n",
reg_number, msgr->irq);
}
return 0;
}
static const struct of_device_id mpic_msgr_ids[] = {
{
.compatible = "fsl,mpic-v3.1-msgr",
.data = NULL,
},
{}
};
static struct platform_driver mpic_msgr_driver = {
.driver = {
.name = "mpic-msgr",
.of_match_table = mpic_msgr_ids,
},
.probe = mpic_msgr_probe,
};
static __init int mpic_msgr_init(void)
{
return platform_driver_register(&mpic_msgr_driver);
}
subsys_initcall(mpic_msgr_init);
| linux-master | arch/powerpc/sysdev/mpic_msgr.c |
/*
* arch/powerpc/kernel/mpic.c
*
* Driver for interrupt controllers following the OpenPIC standard, the
* common implementation being IBM's MPIC. This driver also can deal
* with various broken implementations of this HW.
*
* Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
* Copyright 2010-2012 Freescale Semiconductor, Inc.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#undef DEBUG
#undef DEBUG_IPI
#undef DEBUG_IRQ
#undef DEBUG_LOW
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/ratelimit.h>
#include <linux/pgtable.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/mpic.h>
#include <asm/smp.h>
#include "mpic.h"
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
#else
#define DBG(fmt...)
#endif
struct bus_type mpic_subsys = {
.name = "mpic",
.dev_name = "mpic",
};
EXPORT_SYMBOL_GPL(mpic_subsys);
static struct mpic *mpics;
static struct mpic *mpic_primary;
static DEFINE_RAW_SPINLOCK(mpic_lock);
#ifdef CONFIG_PPC32 /* XXX for now */
#ifdef CONFIG_IRQ_ALL_CPUS
#define distribute_irqs (1)
#else
#define distribute_irqs (0)
#endif
#endif
#ifdef CONFIG_MPIC_WEIRD
static u32 mpic_infos[][MPIC_IDX_END] = {
[0] = { /* Original OpenPIC compatible MPIC */
MPIC_GREG_BASE,
MPIC_GREG_FEATURE_0,
MPIC_GREG_GLOBAL_CONF_0,
MPIC_GREG_VENDOR_ID,
MPIC_GREG_IPI_VECTOR_PRI_0,
MPIC_GREG_IPI_STRIDE,
MPIC_GREG_SPURIOUS,
MPIC_GREG_TIMER_FREQ,
MPIC_TIMER_BASE,
MPIC_TIMER_STRIDE,
MPIC_TIMER_CURRENT_CNT,
MPIC_TIMER_BASE_CNT,
MPIC_TIMER_VECTOR_PRI,
MPIC_TIMER_DESTINATION,
MPIC_CPU_BASE,
MPIC_CPU_STRIDE,
MPIC_CPU_IPI_DISPATCH_0,
MPIC_CPU_IPI_DISPATCH_STRIDE,
MPIC_CPU_CURRENT_TASK_PRI,
MPIC_CPU_WHOAMI,
MPIC_CPU_INTACK,
MPIC_CPU_EOI,
MPIC_CPU_MCACK,
MPIC_IRQ_BASE,
MPIC_IRQ_STRIDE,
MPIC_IRQ_VECTOR_PRI,
MPIC_VECPRI_VECTOR_MASK,
MPIC_VECPRI_POLARITY_POSITIVE,
MPIC_VECPRI_POLARITY_NEGATIVE,
MPIC_VECPRI_SENSE_LEVEL,
MPIC_VECPRI_SENSE_EDGE,
MPIC_VECPRI_POLARITY_MASK,
MPIC_VECPRI_SENSE_MASK,
MPIC_IRQ_DESTINATION
},
[1] = { /* Tsi108/109 PIC */
TSI108_GREG_BASE,
TSI108_GREG_FEATURE_0,
TSI108_GREG_GLOBAL_CONF_0,
TSI108_GREG_VENDOR_ID,
TSI108_GREG_IPI_VECTOR_PRI_0,
TSI108_GREG_IPI_STRIDE,
TSI108_GREG_SPURIOUS,
TSI108_GREG_TIMER_FREQ,
TSI108_TIMER_BASE,
TSI108_TIMER_STRIDE,
TSI108_TIMER_CURRENT_CNT,
TSI108_TIMER_BASE_CNT,
TSI108_TIMER_VECTOR_PRI,
TSI108_TIMER_DESTINATION,
TSI108_CPU_BASE,
TSI108_CPU_STRIDE,
TSI108_CPU_IPI_DISPATCH_0,
TSI108_CPU_IPI_DISPATCH_STRIDE,
TSI108_CPU_CURRENT_TASK_PRI,
TSI108_CPU_WHOAMI,
TSI108_CPU_INTACK,
TSI108_CPU_EOI,
TSI108_CPU_MCACK,
TSI108_IRQ_BASE,
TSI108_IRQ_STRIDE,
TSI108_IRQ_VECTOR_PRI,
TSI108_VECPRI_VECTOR_MASK,
TSI108_VECPRI_POLARITY_POSITIVE,
TSI108_VECPRI_POLARITY_NEGATIVE,
TSI108_VECPRI_SENSE_LEVEL,
TSI108_VECPRI_SENSE_EDGE,
TSI108_VECPRI_POLARITY_MASK,
TSI108_VECPRI_SENSE_MASK,
TSI108_IRQ_DESTINATION
},
};
#define MPIC_INFO(name) mpic->hw_set[MPIC_IDX_##name]
#else /* CONFIG_MPIC_WEIRD */
#define MPIC_INFO(name) MPIC_##name
#endif /* CONFIG_MPIC_WEIRD */
static inline unsigned int mpic_processor_id(struct mpic *mpic)
{
unsigned int cpu = 0;
if (!(mpic->flags & MPIC_SECONDARY))
cpu = hard_smp_processor_id();
return cpu;
}
/*
* Register accessor functions
*/
static inline u32 _mpic_read(enum mpic_reg_type type,
struct mpic_reg_bank *rb,
unsigned int reg)
{
switch(type) {
#ifdef CONFIG_PPC_DCR
case mpic_access_dcr:
return dcr_read(rb->dhost, reg);
#endif
case mpic_access_mmio_be:
return in_be32(rb->base + (reg >> 2));
case mpic_access_mmio_le:
default:
return in_le32(rb->base + (reg >> 2));
}
}
static inline void _mpic_write(enum mpic_reg_type type,
struct mpic_reg_bank *rb,
unsigned int reg, u32 value)
{
switch(type) {
#ifdef CONFIG_PPC_DCR
case mpic_access_dcr:
dcr_write(rb->dhost, reg, value);
break;
#endif
case mpic_access_mmio_be:
out_be32(rb->base + (reg >> 2), value);
break;
case mpic_access_mmio_le:
default:
out_le32(rb->base + (reg >> 2), value);
break;
}
}
static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi)
{
enum mpic_reg_type type = mpic->reg_type;
unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
(ipi * MPIC_INFO(GREG_IPI_STRIDE));
if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le)
type = mpic_access_mmio_be;
return _mpic_read(type, &mpic->gregs, offset);
}
static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value)
{
unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) +
(ipi * MPIC_INFO(GREG_IPI_STRIDE));
_mpic_write(mpic->reg_type, &mpic->gregs, offset, value);
}
static inline unsigned int mpic_tm_offset(struct mpic *mpic, unsigned int tm)
{
return (tm >> 2) * MPIC_TIMER_GROUP_STRIDE +
(tm & 3) * MPIC_INFO(TIMER_STRIDE);
}
static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm)
{
unsigned int offset = mpic_tm_offset(mpic, tm) +
MPIC_INFO(TIMER_VECTOR_PRI);
return _mpic_read(mpic->reg_type, &mpic->tmregs, offset);
}
static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value)
{
unsigned int offset = mpic_tm_offset(mpic, tm) +
MPIC_INFO(TIMER_VECTOR_PRI);
_mpic_write(mpic->reg_type, &mpic->tmregs, offset, value);
}
static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg)
{
unsigned int cpu = mpic_processor_id(mpic);
return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg);
}
static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value)
{
unsigned int cpu = mpic_processor_id(mpic);
_mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value);
}
static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg)
{
unsigned int isu = src_no >> mpic->isu_shift;
unsigned int idx = src_no & mpic->isu_mask;
unsigned int val;
val = _mpic_read(mpic->reg_type, &mpic->isus[isu],
reg + (idx * MPIC_INFO(IRQ_STRIDE)));
#ifdef CONFIG_MPIC_BROKEN_REGREAD
if (reg == 0)
val = (val & (MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY)) |
mpic->isu_reg0_shadow[src_no];
#endif
return val;
}
static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no,
unsigned int reg, u32 value)
{
unsigned int isu = src_no >> mpic->isu_shift;
unsigned int idx = src_no & mpic->isu_mask;
_mpic_write(mpic->reg_type, &mpic->isus[isu],
reg + (idx * MPIC_INFO(IRQ_STRIDE)), value);
#ifdef CONFIG_MPIC_BROKEN_REGREAD
if (reg == 0)
mpic->isu_reg0_shadow[src_no] =
value & ~(MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY);
#endif
}
#define mpic_read(b,r) _mpic_read(mpic->reg_type,&(b),(r))
#define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v))
#define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i))
#define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v))
#define mpic_tm_read(i) _mpic_tm_read(mpic,(i))
#define mpic_tm_write(i,v) _mpic_tm_write(mpic,(i),(v))
#define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i))
#define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v))
#define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r))
#define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v))
/*
* Low level utility functions
*/
static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr,
struct mpic_reg_bank *rb, unsigned int offset,
unsigned int size)
{
rb->base = ioremap(phys_addr + offset, size);
BUG_ON(rb->base == NULL);
}
#ifdef CONFIG_PPC_DCR
static void _mpic_map_dcr(struct mpic *mpic, struct mpic_reg_bank *rb,
unsigned int offset, unsigned int size)
{
phys_addr_t phys_addr = dcr_resource_start(mpic->node, 0);
rb->dhost = dcr_map(mpic->node, phys_addr + offset, size);
BUG_ON(!DCR_MAP_OK(rb->dhost));
}
static inline void mpic_map(struct mpic *mpic,
phys_addr_t phys_addr, struct mpic_reg_bank *rb,
unsigned int offset, unsigned int size)
{
if (mpic->flags & MPIC_USES_DCR)
_mpic_map_dcr(mpic, rb, offset, size);
else
_mpic_map_mmio(mpic, phys_addr, rb, offset, size);
}
#else /* CONFIG_PPC_DCR */
#define mpic_map(m,p,b,o,s) _mpic_map_mmio(m,p,b,o,s)
#endif /* !CONFIG_PPC_DCR */
/* Check if we have one of those nice broken MPICs with a flipped endian on
* reads from IPI registers
*/
static void __init mpic_test_broken_ipi(struct mpic *mpic)
{
u32 r;
mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK);
r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0));
if (r == le32_to_cpu(MPIC_VECPRI_MASK)) {
printk(KERN_INFO "mpic: Detected reversed IPI registers\n");
mpic->flags |= MPIC_BROKEN_IPI;
}
}
#ifdef CONFIG_MPIC_U3_HT_IRQS
/* Test if an interrupt is sourced from HyperTransport (used on broken U3s)
* to force the edge setting on the MPIC and do the ack workaround.
*/
static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
{
if (source >= 128 || !mpic->fixups)
return 0;
return mpic->fixups[source].base != NULL;
}
static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source)
{
struct mpic_irq_fixup *fixup = &mpic->fixups[source];
if (fixup->applebase) {
unsigned int soff = (fixup->index >> 3) & ~3;
unsigned int mask = 1U << (fixup->index & 0x1f);
writel(mask, fixup->applebase + soff);
} else {
raw_spin_lock(&mpic->fixup_lock);
writeb(0x11 + 2 * fixup->index, fixup->base + 2);
writel(fixup->data, fixup->base + 4);
raw_spin_unlock(&mpic->fixup_lock);
}
}
static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source,
bool level)
{
struct mpic_irq_fixup *fixup = &mpic->fixups[source];
unsigned long flags;
u32 tmp;
if (fixup->base == NULL)
return;
DBG("startup_ht_interrupt(0x%x) index: %d\n",
source, fixup->index);
raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
/* Enable and configure */
writeb(0x10 + 2 * fixup->index, fixup->base + 2);
tmp = readl(fixup->base + 4);
tmp &= ~(0x23U);
if (level)
tmp |= 0x22;
writel(tmp, fixup->base + 4);
raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
#ifdef CONFIG_PM
/* use the lowest bit inverted to the actual HW,
* set if this fixup was enabled, clear otherwise */
mpic->save_data[source].fixup_data = tmp | 1;
#endif
}
static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source)
{
struct mpic_irq_fixup *fixup = &mpic->fixups[source];
unsigned long flags;
u32 tmp;
if (fixup->base == NULL)
return;
DBG("shutdown_ht_interrupt(0x%x)\n", source);
/* Disable */
raw_spin_lock_irqsave(&mpic->fixup_lock, flags);
writeb(0x10 + 2 * fixup->index, fixup->base + 2);
tmp = readl(fixup->base + 4);
tmp |= 1;
writel(tmp, fixup->base + 4);
raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags);
#ifdef CONFIG_PM
/* use the lowest bit inverted to the actual HW,
* set if this fixup was enabled, clear otherwise */
mpic->save_data[source].fixup_data = tmp & ~1;
#endif
}
#ifdef CONFIG_PCI_MSI
static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase,
unsigned int devfn)
{
u8 __iomem *base;
u8 pos, flags;
u64 addr = 0;
for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
if (id == PCI_CAP_ID_HT) {
id = readb(devbase + pos + 3);
if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_MSI_MAPPING)
break;
}
}
if (pos == 0)
return;
base = devbase + pos;
flags = readb(base + HT_MSI_FLAGS);
if (!(flags & HT_MSI_FLAGS_FIXED)) {
addr = readl(base + HT_MSI_ADDR_LO) & HT_MSI_ADDR_LO_MASK;
addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32);
}
printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n",
PCI_SLOT(devfn), PCI_FUNC(devfn),
flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr);
if (!(flags & HT_MSI_FLAGS_ENABLE))
writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS);
}
#else
static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase,
unsigned int devfn)
{
return;
}
#endif
static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase,
unsigned int devfn, u32 vdid)
{
int i, irq, n;
u8 __iomem *base;
u32 tmp;
u8 pos;
for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0;
pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) {
u8 id = readb(devbase + pos + PCI_CAP_LIST_ID);
if (id == PCI_CAP_ID_HT) {
id = readb(devbase + pos + 3);
if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_IRQ)
break;
}
}
if (pos == 0)
return;
base = devbase + pos;
writeb(0x01, base + 2);
n = (readl(base + 4) >> 16) & 0xff;
printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x"
" has %d irqs\n",
devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1);
for (i = 0; i <= n; i++) {
writeb(0x10 + 2 * i, base + 2);
tmp = readl(base + 4);
irq = (tmp >> 16) & 0xff;
DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp);
/* mask it , will be unmasked later */
tmp |= 0x1;
writel(tmp, base + 4);
mpic->fixups[irq].index = i;
mpic->fixups[irq].base = base;
/* Apple HT PIC has a non-standard way of doing EOIs */
if ((vdid & 0xffff) == 0x106b)
mpic->fixups[irq].applebase = devbase + 0x60;
else
mpic->fixups[irq].applebase = NULL;
writeb(0x11 + 2 * i, base + 2);
mpic->fixups[irq].data = readl(base + 4) | 0x80000000;
}
}
static void __init mpic_scan_ht_pics(struct mpic *mpic)
{
unsigned int devfn;
u8 __iomem *cfgspace;
printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n");
/* Allocate fixups array */
mpic->fixups = kcalloc(128, sizeof(*mpic->fixups), GFP_KERNEL);
BUG_ON(mpic->fixups == NULL);
/* Init spinlock */
raw_spin_lock_init(&mpic->fixup_lock);
/* Map U3 config space. We assume all IO-APICs are on the primary bus
* so we only need to map 64kB.
*/
cfgspace = ioremap(0xf2000000, 0x10000);
BUG_ON(cfgspace == NULL);
/* Now we scan all slots. We do a very quick scan, we read the header
* type, vendor ID and device ID only, that's plenty enough
*/
for (devfn = 0; devfn < 0x100; devfn++) {
u8 __iomem *devbase = cfgspace + (devfn << 8);
u8 hdr_type = readb(devbase + PCI_HEADER_TYPE);
u32 l = readl(devbase + PCI_VENDOR_ID);
u16 s;
DBG("devfn %x, l: %x\n", devfn, l);
/* If no device, skip */
if (l == 0xffffffff || l == 0x00000000 ||
l == 0x0000ffff || l == 0xffff0000)
goto next;
/* Check if is supports capability lists */
s = readw(devbase + PCI_STATUS);
if (!(s & PCI_STATUS_CAP_LIST))
goto next;
mpic_scan_ht_pic(mpic, devbase, devfn, l);
mpic_scan_ht_msi(mpic, devbase, devfn);
next:
/* next device, if function 0 */
if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0)
devfn += 7;
}
}
#else /* CONFIG_MPIC_U3_HT_IRQS */
static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source)
{
return 0;
}
static void __init mpic_scan_ht_pics(struct mpic *mpic)
{
}
#endif /* CONFIG_MPIC_U3_HT_IRQS */
/* Find an mpic associated with a given linux interrupt */
static struct mpic *mpic_find(unsigned int irq)
{
if (irq < NR_IRQS_LEGACY)
return NULL;
return irq_get_chip_data(irq);
}
/* Determine if the linux irq is an IPI */
static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int src)
{
return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);
}
/* Determine if the linux irq is a timer */
static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int src)
{
return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]);
}
/* Convert a cpu mask from logical to physical cpu numbers. */
static inline u32 mpic_physmask(u32 cpumask)
{
int i;
u32 mask = 0;
for (i = 0; i < min(32, NR_CPUS) && cpu_possible(i); ++i, cpumask >>= 1)
mask |= (cpumask & 1) << get_hard_smp_processor_id(i);
return mask;
}
#ifdef CONFIG_SMP
/* Get the mpic structure from the IPI number */
static inline struct mpic * mpic_from_ipi(struct irq_data *d)
{
return irq_data_get_irq_chip_data(d);
}
#endif
/* Get the mpic structure from the irq number */
static inline struct mpic * mpic_from_irq(unsigned int irq)
{
return irq_get_chip_data(irq);
}
/* Get the mpic structure from the irq data */
static inline struct mpic * mpic_from_irq_data(struct irq_data *d)
{
return irq_data_get_irq_chip_data(d);
}
/* Send an EOI */
static inline void mpic_eoi(struct mpic *mpic)
{
mpic_cpu_write(MPIC_INFO(CPU_EOI), 0);
}
/*
* Linux descriptor level callbacks
*/
void mpic_unmask_irq(struct irq_data *d)
{
unsigned int loops = 100000;
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = irqd_to_hwirq(d);
DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src);
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) &
~MPIC_VECPRI_MASK);
/* make sure mask gets to controller before we return to user */
do {
if (!loops--) {
printk(KERN_ERR "%s: timeout on hwirq %u\n",
__func__, src);
break;
}
} while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
}
void mpic_mask_irq(struct irq_data *d)
{
unsigned int loops = 100000;
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = irqd_to_hwirq(d);
DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src);
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) |
MPIC_VECPRI_MASK);
/* make sure mask gets to controller before we return to user */
do {
if (!loops--) {
printk(KERN_ERR "%s: timeout on hwirq %u\n",
__func__, src);
break;
}
} while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
}
void mpic_end_irq(struct irq_data *d)
{
struct mpic *mpic = mpic_from_irq_data(d);
#ifdef DEBUG_IRQ
DBG("%s: end_irq: %d\n", mpic->name, d->irq);
#endif
/* We always EOI on end_irq() even for edge interrupts since that
* should only lower the priority, the MPIC should have properly
* latched another edge interrupt coming in anyway
*/
mpic_eoi(mpic);
}
#ifdef CONFIG_MPIC_U3_HT_IRQS
static void mpic_unmask_ht_irq(struct irq_data *d)
{
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = irqd_to_hwirq(d);
mpic_unmask_irq(d);
if (irqd_is_level_type(d))
mpic_ht_end_irq(mpic, src);
}
static unsigned int mpic_startup_ht_irq(struct irq_data *d)
{
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = irqd_to_hwirq(d);
mpic_unmask_irq(d);
mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d));
return 0;
}
static void mpic_shutdown_ht_irq(struct irq_data *d)
{
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = irqd_to_hwirq(d);
mpic_shutdown_ht_interrupt(mpic, src);
mpic_mask_irq(d);
}
static void mpic_end_ht_irq(struct irq_data *d)
{
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = irqd_to_hwirq(d);
#ifdef DEBUG_IRQ
DBG("%s: end_irq: %d\n", mpic->name, d->irq);
#endif
/* We always EOI on end_irq() even for edge interrupts since that
* should only lower the priority, the MPIC should have properly
* latched another edge interrupt coming in anyway
*/
if (irqd_is_level_type(d))
mpic_ht_end_irq(mpic, src);
mpic_eoi(mpic);
}
#endif /* !CONFIG_MPIC_U3_HT_IRQS */
#ifdef CONFIG_SMP
static void mpic_unmask_ipi(struct irq_data *d)
{
struct mpic *mpic = mpic_from_ipi(d);
unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0];
DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src);
mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK);
}
static void mpic_mask_ipi(struct irq_data *d)
{
/* NEVER disable an IPI... that's just plain wrong! */
}
static void mpic_end_ipi(struct irq_data *d)
{
struct mpic *mpic = mpic_from_ipi(d);
/*
* IPIs are marked IRQ_PER_CPU. This has the side effect of
* preventing the IRQ_PENDING/IRQ_INPROGRESS logic from
* applying to them. We EOI them late to avoid re-entering.
*/
mpic_eoi(mpic);
}
#endif /* CONFIG_SMP */
static void mpic_unmask_tm(struct irq_data *d)
{
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, d->irq, src);
mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK);
mpic_tm_read(src);
}
static void mpic_mask_tm(struct irq_data *d)
{
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0];
mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK);
mpic_tm_read(src);
}
int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
bool force)
{
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = irqd_to_hwirq(d);
if (mpic->flags & MPIC_SINGLE_DEST_CPU) {
int cpuid = irq_choose_cpu(cpumask);
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
} else {
u32 mask = cpumask_bits(cpumask)[0];
mask &= cpumask_bits(cpu_online_mask)[0];
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
mpic_physmask(mask));
}
return IRQ_SET_MASK_OK;
}
static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
{
/* Now convert sense value */
switch(type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
return MPIC_INFO(VECPRI_SENSE_EDGE) |
MPIC_INFO(VECPRI_POLARITY_POSITIVE);
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
return MPIC_INFO(VECPRI_SENSE_EDGE) |
MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
case IRQ_TYPE_LEVEL_HIGH:
return MPIC_INFO(VECPRI_SENSE_LEVEL) |
MPIC_INFO(VECPRI_POLARITY_POSITIVE);
case IRQ_TYPE_LEVEL_LOW:
default:
return MPIC_INFO(VECPRI_SENSE_LEVEL) |
MPIC_INFO(VECPRI_POLARITY_NEGATIVE);
}
}
int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
struct mpic *mpic = mpic_from_irq_data(d);
unsigned int src = irqd_to_hwirq(d);
unsigned int vecpri, vold, vnew;
DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
mpic, d->irq, src, flow_type);
if (src >= mpic->num_sources)
return -EINVAL;
vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
/* We don't support "none" type */
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_DEFAULT;
/* Default: read HW settings */
if (flow_type == IRQ_TYPE_DEFAULT) {
int vold_ps;
vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
MPIC_INFO(VECPRI_SENSE_MASK));
if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
flow_type = IRQ_TYPE_EDGE_RISING;
else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
flow_type = IRQ_TYPE_EDGE_FALLING;
else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
flow_type = IRQ_TYPE_LEVEL_HIGH;
else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
flow_type = IRQ_TYPE_LEVEL_LOW;
else
WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold);
}
/* Apply to irq desc */
irqd_set_trigger_type(d, flow_type);
/* Apply to HW */
if (mpic_is_ht_interrupt(mpic, src))
vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
MPIC_VECPRI_SENSE_EDGE;
else
vecpri = mpic_type_to_vecpri(mpic, flow_type);
vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) |
MPIC_INFO(VECPRI_SENSE_MASK));
vnew |= vecpri;
if (vold != vnew)
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew);
return IRQ_SET_MASK_OK_NOCOPY;
}
void mpic_set_vector(unsigned int virq, unsigned int vector)
{
struct mpic *mpic = mpic_from_irq(virq);
unsigned int src = virq_to_hw(virq);
unsigned int vecpri;
DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n",
mpic, virq, src, vector);
if (src >= mpic->num_sources)
return;
vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
vecpri = vecpri & ~MPIC_INFO(VECPRI_VECTOR_MASK);
vecpri |= vector;
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
}
static void mpic_set_destination(unsigned int virq, unsigned int cpuid)
{
struct mpic *mpic = mpic_from_irq(virq);
unsigned int src = virq_to_hw(virq);
DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n",
mpic, virq, src, cpuid);
if (src >= mpic->num_sources)
return;
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
}
static struct irq_chip mpic_irq_chip = {
.irq_mask = mpic_mask_irq,
.irq_unmask = mpic_unmask_irq,
.irq_eoi = mpic_end_irq,
.irq_set_type = mpic_set_irq_type,
};
#ifdef CONFIG_SMP
static const struct irq_chip mpic_ipi_chip = {
.irq_mask = mpic_mask_ipi,
.irq_unmask = mpic_unmask_ipi,
.irq_eoi = mpic_end_ipi,
};
#endif /* CONFIG_SMP */
static struct irq_chip mpic_tm_chip = {
.irq_mask = mpic_mask_tm,
.irq_unmask = mpic_unmask_tm,
.irq_eoi = mpic_end_irq,
};
#ifdef CONFIG_MPIC_U3_HT_IRQS
static const struct irq_chip mpic_irq_ht_chip = {
.irq_startup = mpic_startup_ht_irq,
.irq_shutdown = mpic_shutdown_ht_irq,
.irq_mask = mpic_mask_irq,
.irq_unmask = mpic_unmask_ht_irq,
.irq_eoi = mpic_end_ht_irq,
.irq_set_type = mpic_set_irq_type,
};
#endif /* CONFIG_MPIC_U3_HT_IRQS */
static int mpic_host_match(struct irq_domain *h, struct device_node *node,
enum irq_domain_bus_token bus_token)
{
/* Exact match, unless mpic node is NULL */
struct device_node *of_node = irq_domain_get_of_node(h);
return of_node == NULL || of_node == node;
}
static int mpic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct mpic *mpic = h->host_data;
struct irq_chip *chip;
DBG("mpic: map virq %d, hwirq 0x%lx\n", virq, hw);
if (hw == mpic->spurious_vec)
return -EINVAL;
if (mpic->protected && test_bit(hw, mpic->protected)) {
pr_warn("mpic: Mapping of source 0x%x failed, source protected by firmware !\n",
(unsigned int)hw);
return -EPERM;
}
#ifdef CONFIG_SMP
else if (hw >= mpic->ipi_vecs[0]) {
WARN_ON(mpic->flags & MPIC_SECONDARY);
DBG("mpic: mapping as IPI\n");
irq_set_chip_data(virq, mpic);
irq_set_chip_and_handler(virq, &mpic->hc_ipi,
handle_percpu_irq);
return 0;
}
#endif /* CONFIG_SMP */
if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) {
WARN_ON(mpic->flags & MPIC_SECONDARY);
DBG("mpic: mapping as timer\n");
irq_set_chip_data(virq, mpic);
irq_set_chip_and_handler(virq, &mpic->hc_tm,
handle_fasteoi_irq);
return 0;
}
if (mpic_map_error_int(mpic, virq, hw))
return 0;
if (hw >= mpic->num_sources) {
pr_warn("mpic: Mapping of source 0x%x failed, source out of range !\n",
(unsigned int)hw);
return -EINVAL;
}
mpic_msi_reserve_hwirq(mpic, hw);
/* Default chip */
chip = &mpic->hc_irq;
#ifdef CONFIG_MPIC_U3_HT_IRQS
/* Check for HT interrupts, override vecpri */
if (mpic_is_ht_interrupt(mpic, hw))
chip = &mpic->hc_ht_irq;
#endif /* CONFIG_MPIC_U3_HT_IRQS */
DBG("mpic: mapping to irq chip @%p\n", chip);
irq_set_chip_data(virq, mpic);
irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
/* Set default irq type */
irq_set_irq_type(virq, IRQ_TYPE_DEFAULT);
/* If the MPIC was reset, then all vectors have already been
* initialized. Otherwise, a per source lazy initialization
* is done here.
*/
if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) {
int cpu;
preempt_disable();
cpu = mpic_processor_id(mpic);
preempt_enable();
mpic_set_vector(virq, hw);
mpic_set_destination(virq, cpu);
mpic_irq_set_priority(virq, 8);
}
return 0;
}
static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
struct mpic *mpic = h->host_data;
static unsigned char map_mpic_senses[4] = {
IRQ_TYPE_EDGE_RISING,
IRQ_TYPE_LEVEL_LOW,
IRQ_TYPE_LEVEL_HIGH,
IRQ_TYPE_EDGE_FALLING,
};
*out_hwirq = intspec[0];
if (intsize >= 4 && (mpic->flags & MPIC_FSL)) {
/*
* Freescale MPIC with extended intspec:
* First two cells are as usual. Third specifies
* an "interrupt type". Fourth is type-specific data.
*
* See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
*/
switch (intspec[2]) {
case 0:
break;
case 1:
if (!(mpic->flags & MPIC_FSL_HAS_EIMR))
break;
if (intspec[3] >= ARRAY_SIZE(mpic->err_int_vecs))
return -EINVAL;
*out_hwirq = mpic->err_int_vecs[intspec[3]];
break;
case 2:
if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs))
return -EINVAL;
*out_hwirq = mpic->ipi_vecs[intspec[0]];
break;
case 3:
if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs))
return -EINVAL;
*out_hwirq = mpic->timer_vecs[intspec[0]];
break;
default:
pr_debug("%s: unknown irq type %u\n",
__func__, intspec[2]);
return -EINVAL;
}
*out_flags = map_mpic_senses[intspec[1] & 3];
} else if (intsize > 1) {
u32 mask = 0x3;
/* Apple invented a new race of encoding on machines with
* an HT APIC. They encode, among others, the index within
* the HT APIC. We don't care about it here since thankfully,
* it appears that they have the APIC already properly
* configured, and thus our current fixup code that reads the
* APIC config works fine. However, we still need to mask out
* bits in the specifier to make sure we only get bit 0 which
* is the level/edge bit (the only sense bit exposed by Apple),
* as their bit 1 means something else.
*/
if (machine_is(powermac))
mask = 0x1;
*out_flags = map_mpic_senses[intspec[1] & mask];
} else
*out_flags = IRQ_TYPE_NONE;
DBG("mpic: xlate (%d cells: 0x%08x 0x%08x) to line 0x%lx sense 0x%x\n",
intsize, intspec[0], intspec[1], *out_hwirq, *out_flags);
return 0;
}
/* IRQ handler for a secondary MPIC cascaded from another IRQ controller */
static void mpic_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct mpic *mpic = irq_desc_get_handler_data(desc);
unsigned int virq;
BUG_ON(!(mpic->flags & MPIC_SECONDARY));
virq = mpic_get_one_irq(mpic);
if (virq)
generic_handle_irq(virq);
chip->irq_eoi(&desc->irq_data);
}
static const struct irq_domain_ops mpic_host_ops = {
.match = mpic_host_match,
.map = mpic_host_map,
.xlate = mpic_host_xlate,
};
static u32 fsl_mpic_get_version(struct mpic *mpic)
{
u32 brr1;
if (!(mpic->flags & MPIC_FSL))
return 0;
brr1 = _mpic_read(mpic->reg_type, &mpic->thiscpuregs,
MPIC_FSL_BRR1);
return brr1 & MPIC_FSL_BRR1_VER;
}
/*
* Exported functions
*/
u32 fsl_mpic_primary_get_version(void)
{
struct mpic *mpic = mpic_primary;
if (mpic)
return fsl_mpic_get_version(mpic);
return 0;
}
struct mpic * __init mpic_alloc(struct device_node *node,
phys_addr_t phys_addr,
unsigned int flags,
unsigned int isu_size,
unsigned int irq_count,
const char *name)
{
int i, psize, intvec_top;
struct mpic *mpic;
u32 greg_feature;
const char *vers;
const u32 *psrc;
u32 last_irq;
u32 fsl_version = 0;
/* Default MPIC search parameters */
static const struct of_device_id __initconst mpic_device_id[] = {
{ .type = "open-pic", },
{ .compatible = "open-pic", },
{},
};
/*
* If we were not passed a device-tree node, then perform the default
* search for standardized a standardized OpenPIC.
*/
if (node) {
node = of_node_get(node);
} else {
node = of_find_matching_node(NULL, mpic_device_id);
if (!node)
return NULL;
}
/* Pick the physical address from the device tree if unspecified */
if (!phys_addr) {
/* Check if it is DCR-based */
if (of_property_read_bool(node, "dcr-reg")) {
flags |= MPIC_USES_DCR;
} else {
struct resource r;
if (of_address_to_resource(node, 0, &r))
goto err_of_node_put;
phys_addr = r.start;
}
}
/* Read extra device-tree properties into the flags variable */
if (of_property_read_bool(node, "big-endian"))
flags |= MPIC_BIG_ENDIAN;
if (of_property_read_bool(node, "pic-no-reset"))
flags |= MPIC_NO_RESET;
if (of_property_read_bool(node, "single-cpu-affinity"))
flags |= MPIC_SINGLE_DEST_CPU;
if (of_device_is_compatible(node, "fsl,mpic")) {
flags |= MPIC_FSL | MPIC_LARGE_VECTORS;
mpic_irq_chip.flags |= IRQCHIP_SKIP_SET_WAKE;
mpic_tm_chip.flags |= IRQCHIP_SKIP_SET_WAKE;
}
mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
if (mpic == NULL)
goto err_of_node_put;
mpic->name = name;
mpic->node = node;
mpic->paddr = phys_addr;
mpic->flags = flags;
mpic->hc_irq = mpic_irq_chip;
mpic->hc_irq.name = name;
if (!(mpic->flags & MPIC_SECONDARY))
mpic->hc_irq.irq_set_affinity = mpic_set_affinity;
#ifdef CONFIG_MPIC_U3_HT_IRQS
mpic->hc_ht_irq = mpic_irq_ht_chip;
mpic->hc_ht_irq.name = name;
if (!(mpic->flags & MPIC_SECONDARY))
mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity;
#endif /* CONFIG_MPIC_U3_HT_IRQS */
#ifdef CONFIG_SMP
mpic->hc_ipi = mpic_ipi_chip;
mpic->hc_ipi.name = name;
#endif /* CONFIG_SMP */
mpic->hc_tm = mpic_tm_chip;
mpic->hc_tm.name = name;
mpic->num_sources = 0; /* so far */
if (mpic->flags & MPIC_LARGE_VECTORS)
intvec_top = 2047;
else
intvec_top = 255;
mpic->timer_vecs[0] = intvec_top - 12;
mpic->timer_vecs[1] = intvec_top - 11;
mpic->timer_vecs[2] = intvec_top - 10;
mpic->timer_vecs[3] = intvec_top - 9;
mpic->timer_vecs[4] = intvec_top - 8;
mpic->timer_vecs[5] = intvec_top - 7;
mpic->timer_vecs[6] = intvec_top - 6;
mpic->timer_vecs[7] = intvec_top - 5;
mpic->ipi_vecs[0] = intvec_top - 4;
mpic->ipi_vecs[1] = intvec_top - 3;
mpic->ipi_vecs[2] = intvec_top - 2;
mpic->ipi_vecs[3] = intvec_top - 1;
mpic->spurious_vec = intvec_top;
/* Look for protected sources */
psrc = of_get_property(mpic->node, "protected-sources", &psize);
if (psrc) {
/* Allocate a bitmap with one bit per interrupt */
mpic->protected = bitmap_zalloc(intvec_top + 1, GFP_KERNEL);
BUG_ON(mpic->protected == NULL);
for (i = 0; i < psize/sizeof(u32); i++) {
if (psrc[i] > intvec_top)
continue;
__set_bit(psrc[i], mpic->protected);
}
}
#ifdef CONFIG_MPIC_WEIRD
mpic->hw_set = mpic_infos[MPIC_GET_REGSET(mpic->flags)];
#endif
/* default register type */
if (mpic->flags & MPIC_BIG_ENDIAN)
mpic->reg_type = mpic_access_mmio_be;
else
mpic->reg_type = mpic_access_mmio_le;
/*
* An MPIC with a "dcr-reg" property must be accessed that way, but
* only if the kernel includes DCR support.
*/
#ifdef CONFIG_PPC_DCR
if (mpic->flags & MPIC_USES_DCR)
mpic->reg_type = mpic_access_dcr;
#else
BUG_ON(mpic->flags & MPIC_USES_DCR);
#endif
/* Map the global registers */
mpic_map(mpic, mpic->paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000);
mpic_map(mpic, mpic->paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000);
if (mpic->flags & MPIC_FSL) {
int ret;
/*
* Yes, Freescale really did put global registers in the
* magic per-cpu area -- and they don't even show up in the
* non-magic per-cpu copies that this driver normally uses.
*/
mpic_map(mpic, mpic->paddr, &mpic->thiscpuregs,
MPIC_CPU_THISBASE, 0x1000);
fsl_version = fsl_mpic_get_version(mpic);
/* Error interrupt mask register (EIMR) is required for
* handling individual device error interrupts. EIMR
* was added in MPIC version 4.1.
*
* Over here we reserve vector number space for error
* interrupt vectors. This space is stolen from the
* global vector number space, as in case of ipis
* and timer interrupts.
*
* Available vector space = intvec_top - 13, where 13
* is the number of vectors which have been consumed by
* ipis, timer interrupts and spurious.
*/
if (fsl_version >= 0x401) {
ret = mpic_setup_error_int(mpic, intvec_top - 13);
if (ret)
return NULL;
}
}
/*
* EPR is only available starting with v4.0. To support
* platforms that don't know the MPIC version at compile-time,
* such as qemu-e500, turn off coreint if this MPIC doesn't
* support it. Note that we never enable it if it wasn't
* requested in the first place.
*
* This is done outside the MPIC_FSL check, so that we
* also disable coreint if the MPIC node doesn't have
* an "fsl,mpic" compatible at all. This will be the case
* with device trees generated by older versions of QEMU.
* fsl_version will be zero if MPIC_FSL is not set.
*/
if (fsl_version < 0x400 && (flags & MPIC_ENABLE_COREINT))
ppc_md.get_irq = mpic_get_irq;
/* Reset */
/* When using a device-node, reset requests are only honored if the MPIC
* is allowed to reset.
*/
if (!(mpic->flags & MPIC_NO_RESET)) {
printk(KERN_DEBUG "mpic: Resetting\n");
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_RESET);
while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
& MPIC_GREG_GCONF_RESET)
mb();
}
/* CoreInt */
if (mpic->flags & MPIC_ENABLE_COREINT)
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_COREINT);
if (mpic->flags & MPIC_ENABLE_MCK)
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_MCK);
/*
* The MPIC driver will crash if there are more cores than we
* can initialize, so we may as well catch that problem here.
*/
BUG_ON(num_possible_cpus() > MPIC_MAX_CPUS);
/* Map the per-CPU registers */
for_each_possible_cpu(i) {
unsigned int cpu = get_hard_smp_processor_id(i);
mpic_map(mpic, mpic->paddr, &mpic->cpuregs[cpu],
MPIC_INFO(CPU_BASE) + cpu * MPIC_INFO(CPU_STRIDE),
0x1000);
}
/*
* Read feature register. For non-ISU MPICs, num sources as well. On
* ISU MPICs, sources are counted as ISUs are added
*/
greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0));
/*
* By default, the last source number comes from the MPIC, but the
* device-tree and board support code can override it on buggy hw.
* If we get passed an isu_size (multi-isu MPIC) then we use that
* as a default instead of the value read from the HW.
*/
last_irq = (greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK)
>> MPIC_GREG_FEATURE_LAST_SRC_SHIFT;
if (isu_size)
last_irq = isu_size * MPIC_MAX_ISU - 1;
of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq);
if (irq_count)
last_irq = irq_count - 1;
/* Initialize main ISU if none provided */
if (!isu_size) {
isu_size = last_irq + 1;
mpic->num_sources = isu_size;
mpic_map(mpic, mpic->paddr, &mpic->isus[0],
MPIC_INFO(IRQ_BASE),
MPIC_INFO(IRQ_STRIDE) * isu_size);
}
mpic->isu_size = isu_size;
mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
mpic->isu_mask = (1 << mpic->isu_shift) - 1;
mpic->irqhost = irq_domain_add_linear(mpic->node,
intvec_top,
&mpic_host_ops, mpic);
/*
* FIXME: The code leaks the MPIC object and mappings here; this
* is very unlikely to fail but it ought to be fixed anyways.
*/
if (mpic->irqhost == NULL)
return NULL;
/* Display version */
switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) {
case 1:
vers = "1.0";
break;
case 2:
vers = "1.2";
break;
case 3:
vers = "1.3";
break;
default:
vers = "<unknown>";
break;
}
printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx,"
" max %d CPUs\n",
name, vers, (unsigned long long)mpic->paddr, num_possible_cpus());
printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n",
mpic->isu_size, mpic->isu_shift, mpic->isu_mask);
mpic->next = mpics;
mpics = mpic;
if (!(mpic->flags & MPIC_SECONDARY)) {
mpic_primary = mpic;
irq_set_default_host(mpic->irqhost);
}
return mpic;
err_of_node_put:
of_node_put(node);
return NULL;
}
void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
phys_addr_t paddr)
{
unsigned int isu_first = isu_num * mpic->isu_size;
BUG_ON(isu_num >= MPIC_MAX_ISU);
mpic_map(mpic,
paddr, &mpic->isus[isu_num], 0,
MPIC_INFO(IRQ_STRIDE) * mpic->isu_size);
if ((isu_first + mpic->isu_size) > mpic->num_sources)
mpic->num_sources = isu_first + mpic->isu_size;
}
void __init mpic_init(struct mpic *mpic)
{
int i, cpu;
int num_timers = 4;
BUG_ON(mpic->num_sources == 0);
printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources);
/* Set current processor priority to max */
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
if (mpic->flags & MPIC_FSL) {
u32 version = fsl_mpic_get_version(mpic);
/*
* Timer group B is present at the latest in MPIC 3.1 (e.g.
* mpc8536). It is not present in MPIC 2.0 (e.g. mpc8544).
* I don't know about the status of intermediate versions (or
* whether they even exist).
*/
if (version >= 0x0301)
num_timers = 8;
}
/* Initialize timers to our reserved vectors and mask them for now */
for (i = 0; i < num_timers; i++) {
unsigned int offset = mpic_tm_offset(mpic, i);
mpic_write(mpic->tmregs,
offset + MPIC_INFO(TIMER_DESTINATION),
1 << hard_smp_processor_id());
mpic_write(mpic->tmregs,
offset + MPIC_INFO(TIMER_VECTOR_PRI),
MPIC_VECPRI_MASK |
(9 << MPIC_VECPRI_PRIORITY_SHIFT) |
(mpic->timer_vecs[0] + i));
}
/* Initialize IPIs to our reserved vectors and mark them disabled for now */
mpic_test_broken_ipi(mpic);
for (i = 0; i < 4; i++) {
mpic_ipi_write(i,
MPIC_VECPRI_MASK |
(10 << MPIC_VECPRI_PRIORITY_SHIFT) |
(mpic->ipi_vecs[0] + i));
}
/* Do the HT PIC fixups on U3 broken mpic */
DBG("MPIC flags: %x\n", mpic->flags);
if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) {
mpic_scan_ht_pics(mpic);
mpic_u3msi_init(mpic);
}
mpic_pasemi_msi_init(mpic);
cpu = mpic_processor_id(mpic);
if (!(mpic->flags & MPIC_NO_RESET)) {
for (i = 0; i < mpic->num_sources; i++) {
/* start with vector = source number, and masked */
u32 vecpri = MPIC_VECPRI_MASK | i |
(8 << MPIC_VECPRI_PRIORITY_SHIFT);
/* check if protected */
if (mpic->protected && test_bit(i, mpic->protected))
continue;
/* init hw */
mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri);
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu);
}
}
/* Init spurious vector */
mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec);
/* Disable 8259 passthrough, if supported */
if (!(mpic->flags & MPIC_NO_PTHROU_DIS))
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_8259_PTHROU_DIS);
if (mpic->flags & MPIC_NO_BIAS)
mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
| MPIC_GREG_GCONF_NO_BIAS);
/* Set current processor priority to 0 */
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
#ifdef CONFIG_PM
/* allocate memory to save mpic state */
mpic->save_data = kmalloc_array(mpic->num_sources,
sizeof(*mpic->save_data),
GFP_KERNEL);
BUG_ON(mpic->save_data == NULL);
#endif
/* Check if this MPIC is chained from a parent interrupt controller */
if (mpic->flags & MPIC_SECONDARY) {
int virq = irq_of_parse_and_map(mpic->node, 0);
if (virq) {
printk(KERN_INFO "%pOF: hooking up to IRQ %d\n",
mpic->node, virq);
irq_set_handler_data(virq, mpic);
irq_set_chained_handler(virq, &mpic_cascade);
}
}
/* FSL mpic error interrupt initialization */
if (mpic->flags & MPIC_FSL_HAS_EIMR)
mpic_err_int_init(mpic, MPIC_FSL_ERR_INT);
}
void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
{
struct mpic *mpic = mpic_find(irq);
unsigned int src = virq_to_hw(irq);
unsigned long flags;
u32 reg;
if (!mpic)
return;
raw_spin_lock_irqsave(&mpic_lock, flags);
if (mpic_is_ipi(mpic, src)) {
reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) &
~MPIC_VECPRI_PRIORITY_MASK;
mpic_ipi_write(src - mpic->ipi_vecs[0],
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
} else if (mpic_is_tm(mpic, src)) {
reg = mpic_tm_read(src - mpic->timer_vecs[0]) &
~MPIC_VECPRI_PRIORITY_MASK;
mpic_tm_write(src - mpic->timer_vecs[0],
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
} else {
reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI))
& ~MPIC_VECPRI_PRIORITY_MASK;
mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI),
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
}
raw_spin_unlock_irqrestore(&mpic_lock, flags);
}
void mpic_setup_this_cpu(void)
{
#ifdef CONFIG_SMP
struct mpic *mpic = mpic_primary;
unsigned long flags;
u32 msk = 1 << hard_smp_processor_id();
unsigned int i;
BUG_ON(mpic == NULL);
DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
raw_spin_lock_irqsave(&mpic_lock, flags);
/* let the mpic know we want intrs. default affinity is 0xffffffff
* until changed via /proc. That's how it's done on x86. If we want
* it differently, then we should make sure we also change the default
* values of irq_desc[].affinity in irq.c.
*/
if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) {
for (i = 0; i < mpic->num_sources ; i++)
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk);
}
/* Set current processor priority to 0 */
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0);
raw_spin_unlock_irqrestore(&mpic_lock, flags);
#endif /* CONFIG_SMP */
}
int mpic_cpu_get_priority(void)
{
struct mpic *mpic = mpic_primary;
return mpic_cpu_read(MPIC_INFO(CPU_CURRENT_TASK_PRI));
}
void mpic_cpu_set_priority(int prio)
{
struct mpic *mpic = mpic_primary;
prio &= MPIC_CPU_TASKPRI_MASK;
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), prio);
}
void mpic_teardown_this_cpu(int secondary)
{
struct mpic *mpic = mpic_primary;
unsigned long flags;
u32 msk = 1 << hard_smp_processor_id();
unsigned int i;
BUG_ON(mpic == NULL);
DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id());
raw_spin_lock_irqsave(&mpic_lock, flags);
/* let the mpic know we don't want intrs. */
for (i = 0; i < mpic->num_sources ; i++)
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk);
/* Set current processor priority to max */
mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf);
/* We need to EOI the IPI since not all platforms reset the MPIC
* on boot and new interrupts wouldn't get delivered otherwise.
*/
mpic_eoi(mpic);
raw_spin_unlock_irqrestore(&mpic_lock, flags);
}
static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg)
{
u32 src;
src = mpic_cpu_read(reg) & MPIC_INFO(VECPRI_VECTOR_MASK);
#ifdef DEBUG_LOW
DBG("%s: get_one_irq(reg 0x%x): %d\n", mpic->name, reg, src);
#endif
if (unlikely(src == mpic->spurious_vec)) {
if (mpic->flags & MPIC_SPV_EOI)
mpic_eoi(mpic);
return 0;
}
if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
mpic->name, (int)src);
mpic_eoi(mpic);
return 0;
}
return irq_linear_revmap(mpic->irqhost, src);
}
unsigned int mpic_get_one_irq(struct mpic *mpic)
{
return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_INTACK));
}
unsigned int mpic_get_irq(void)
{
struct mpic *mpic = mpic_primary;
BUG_ON(mpic == NULL);
return mpic_get_one_irq(mpic);
}
unsigned int mpic_get_coreint_irq(void)
{
#ifdef CONFIG_BOOKE
struct mpic *mpic = mpic_primary;
u32 src;
BUG_ON(mpic == NULL);
src = mfspr(SPRN_EPR);
if (unlikely(src == mpic->spurious_vec)) {
if (mpic->flags & MPIC_SPV_EOI)
mpic_eoi(mpic);
return 0;
}
if (unlikely(mpic->protected && test_bit(src, mpic->protected))) {
printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n",
mpic->name, (int)src);
return 0;
}
return irq_linear_revmap(mpic->irqhost, src);
#else
return 0;
#endif
}
unsigned int mpic_get_mcirq(void)
{
struct mpic *mpic = mpic_primary;
BUG_ON(mpic == NULL);
return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_MCACK));
}
#ifdef CONFIG_SMP
void __init mpic_request_ipis(void)
{
struct mpic *mpic = mpic_primary;
int i;
BUG_ON(mpic == NULL);
printk(KERN_INFO "mpic: requesting IPIs...\n");
for (i = 0; i < 4; i++) {
unsigned int vipi = irq_create_mapping(mpic->irqhost,
mpic->ipi_vecs[0] + i);
if (!vipi) {
printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]);
continue;
}
smp_request_message_ipi(vipi, i);
}
}
void smp_mpic_message_pass(int cpu, int msg)
{
struct mpic *mpic = mpic_primary;
u32 physmask;
BUG_ON(mpic == NULL);
/* make sure we're sending something that translates to an IPI */
if ((unsigned int)msg > 3) {
printk("SMP %d: smp_message_pass: unknown msg %d\n",
smp_processor_id(), msg);
return;
}
#ifdef DEBUG_IPI
DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg);
#endif
physmask = 1 << get_hard_smp_processor_id(cpu);
mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) +
msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);
}
void __init smp_mpic_probe(void)
{
int nr_cpus;
DBG("smp_mpic_probe()...\n");
nr_cpus = num_possible_cpus();
DBG("nr_cpus: %d\n", nr_cpus);
if (nr_cpus > 1)
mpic_request_ipis();
}
void smp_mpic_setup_cpu(int cpu)
{
mpic_setup_this_cpu();
}
void mpic_reset_core(int cpu)
{
struct mpic *mpic = mpic_primary;
u32 pir;
int cpuid = get_hard_smp_processor_id(cpu);
int i;
/* Set target bit for core reset */
pir = mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
pir |= (1 << cpuid);
mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir);
mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
/* Restore target bit after reset complete */
pir &= ~(1 << cpuid);
mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir);
mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT));
/* Perform 15 EOI on each reset core to clear pending interrupts.
* This is required for FSL CoreNet based devices */
if (mpic->flags & MPIC_FSL) {
for (i = 0; i < 15; i++) {
_mpic_write(mpic->reg_type, &mpic->cpuregs[cpuid],
MPIC_CPU_EOI, 0);
}
}
}
#endif /* CONFIG_SMP */
#ifdef CONFIG_PM
static void mpic_suspend_one(struct mpic *mpic)
{
int i;
for (i = 0; i < mpic->num_sources; i++) {
mpic->save_data[i].vecprio =
mpic_irq_read(i, MPIC_INFO(IRQ_VECTOR_PRI));
mpic->save_data[i].dest =
mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION));
}
}
static int mpic_suspend(void)
{
struct mpic *mpic = mpics;
while (mpic) {
mpic_suspend_one(mpic);
mpic = mpic->next;
}
return 0;
}
static void mpic_resume_one(struct mpic *mpic)
{
int i;
for (i = 0; i < mpic->num_sources; i++) {
mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI),
mpic->save_data[i].vecprio);
mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION),
mpic->save_data[i].dest);
#ifdef CONFIG_MPIC_U3_HT_IRQS
if (mpic->fixups) {
struct mpic_irq_fixup *fixup = &mpic->fixups[i];
if (fixup->base) {
/* we use the lowest bit in an inverted meaning */
if ((mpic->save_data[i].fixup_data & 1) == 0)
continue;
/* Enable and configure */
writeb(0x10 + 2 * fixup->index, fixup->base + 2);
writel(mpic->save_data[i].fixup_data & ~1,
fixup->base + 4);
}
}
#endif
} /* end for loop */
}
static void mpic_resume(void)
{
struct mpic *mpic = mpics;
while (mpic) {
mpic_resume_one(mpic);
mpic = mpic->next;
}
}
static struct syscore_ops mpic_syscore_ops = {
.resume = mpic_resume,
.suspend = mpic_suspend,
};
static int mpic_init_sys(void)
{
int rc;
register_syscore_ops(&mpic_syscore_ops);
rc = subsys_system_register(&mpic_subsys, NULL);
if (rc) {
unregister_syscore_ops(&mpic_syscore_ops);
pr_err("mpic: Failed to register subsystem!\n");
return rc;
}
return 0;
}
device_initcall(mpic_init_sys);
#endif
| linux-master | arch/powerpc/sysdev/mpic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Common CPM code
*
* Author: Scott Wood <[email protected]>
*
* Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
*
* Some parts derived from commproc.c/cpm2_common.c, which is:
* Copyright (c) 1997 Dan error_act ([email protected])
* Copyright (c) 1999-2001 Dan Malek <[email protected]>
* Copyright (c) 2000 MontaVista Software, Inc ([email protected])
* 2006 (c) MontaVista Software, Inc.
* Vitaly Bordug <[email protected]>
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <asm/udbg.h>
#include <asm/io.h>
#include <asm/cpm.h>
#include <asm/fixmap.h>
#include <soc/fsl/qe/qe.h>
#include <mm/mmu_decl.h>
#if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO)
#include <linux/gpio/legacy-of-mm-gpiochip.h>
#endif
static int __init cpm_init(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
if (!np)
np = of_find_compatible_node(NULL, NULL, "fsl,cpm2");
if (!np)
return -ENODEV;
cpm_muram_init();
of_node_put(np);
return 0;
}
subsys_initcall(cpm_init);
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
static u32 __iomem *cpm_udbg_txdesc;
static u8 __iomem *cpm_udbg_txbuf;
static void udbg_putc_cpm(char c)
{
if (c == '\n')
udbg_putc_cpm('\r');
while (in_be32(&cpm_udbg_txdesc[0]) & 0x80000000)
;
out_8(cpm_udbg_txbuf, c);
out_be32(&cpm_udbg_txdesc[0], 0xa0000001);
}
void __init udbg_init_cpm(void)
{
#ifdef CONFIG_PPC_8xx
mmu_mapin_immr();
cpm_udbg_txdesc = (u32 __iomem __force *)
(CONFIG_PPC_EARLY_DEBUG_CPM_ADDR - PHYS_IMMR_BASE +
VIRT_IMMR_BASE);
cpm_udbg_txbuf = (u8 __iomem __force *)
(in_be32(&cpm_udbg_txdesc[1]) - PHYS_IMMR_BASE +
VIRT_IMMR_BASE);
#else
cpm_udbg_txdesc = (u32 __iomem __force *)
CONFIG_PPC_EARLY_DEBUG_CPM_ADDR;
cpm_udbg_txbuf = (u8 __iomem __force *)in_be32(&cpm_udbg_txdesc[1]);
#endif
if (cpm_udbg_txdesc) {
#ifdef CONFIG_CPM2
setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG);
#endif
udbg_putc = udbg_putc_cpm;
}
}
#endif
#if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO)
struct cpm2_ioports {
u32 dir, par, sor, odr, dat;
u32 res[3];
};
struct cpm2_gpio32_chip {
struct of_mm_gpio_chip mm_gc;
spinlock_t lock;
/* shadowed data register to clear/set bits safely */
u32 cpdata;
};
static void cpm2_gpio32_save_regs(struct of_mm_gpio_chip *mm_gc)
{
struct cpm2_gpio32_chip *cpm2_gc =
container_of(mm_gc, struct cpm2_gpio32_chip, mm_gc);
struct cpm2_ioports __iomem *iop = mm_gc->regs;
cpm2_gc->cpdata = in_be32(&iop->dat);
}
static int cpm2_gpio32_get(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm2_ioports __iomem *iop = mm_gc->regs;
u32 pin_mask;
pin_mask = 1 << (31 - gpio);
return !!(in_be32(&iop->dat) & pin_mask);
}
static void __cpm2_gpio32_set(struct of_mm_gpio_chip *mm_gc, u32 pin_mask,
int value)
{
struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(&mm_gc->gc);
struct cpm2_ioports __iomem *iop = mm_gc->regs;
if (value)
cpm2_gc->cpdata |= pin_mask;
else
cpm2_gc->cpdata &= ~pin_mask;
out_be32(&iop->dat, cpm2_gc->cpdata);
}
static void cpm2_gpio32_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc);
unsigned long flags;
u32 pin_mask = 1 << (31 - gpio);
spin_lock_irqsave(&cpm2_gc->lock, flags);
__cpm2_gpio32_set(mm_gc, pin_mask, value);
spin_unlock_irqrestore(&cpm2_gc->lock, flags);
}
static int cpm2_gpio32_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc);
struct cpm2_ioports __iomem *iop = mm_gc->regs;
unsigned long flags;
u32 pin_mask = 1 << (31 - gpio);
spin_lock_irqsave(&cpm2_gc->lock, flags);
setbits32(&iop->dir, pin_mask);
__cpm2_gpio32_set(mm_gc, pin_mask, val);
spin_unlock_irqrestore(&cpm2_gc->lock, flags);
return 0;
}
static int cpm2_gpio32_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct cpm2_gpio32_chip *cpm2_gc = gpiochip_get_data(gc);
struct cpm2_ioports __iomem *iop = mm_gc->regs;
unsigned long flags;
u32 pin_mask = 1 << (31 - gpio);
spin_lock_irqsave(&cpm2_gc->lock, flags);
clrbits32(&iop->dir, pin_mask);
spin_unlock_irqrestore(&cpm2_gc->lock, flags);
return 0;
}
int cpm2_gpiochip_add32(struct device *dev)
{
struct device_node *np = dev->of_node;
struct cpm2_gpio32_chip *cpm2_gc;
struct of_mm_gpio_chip *mm_gc;
struct gpio_chip *gc;
cpm2_gc = kzalloc(sizeof(*cpm2_gc), GFP_KERNEL);
if (!cpm2_gc)
return -ENOMEM;
spin_lock_init(&cpm2_gc->lock);
mm_gc = &cpm2_gc->mm_gc;
gc = &mm_gc->gc;
mm_gc->save_regs = cpm2_gpio32_save_regs;
gc->ngpio = 32;
gc->direction_input = cpm2_gpio32_dir_in;
gc->direction_output = cpm2_gpio32_dir_out;
gc->get = cpm2_gpio32_get;
gc->set = cpm2_gpio32_set;
gc->parent = dev;
gc->owner = THIS_MODULE;
return of_mm_gpiochip_add_data(np, mm_gc, cpm2_gc);
}
#endif /* CONFIG_CPM2 || CONFIG_8xx_GPIO */
| linux-master | arch/powerpc/sysdev/cpm_common.c |
/*
* Platform information definitions.
*
* Copied from arch/ppc/syslib/cpm2_pic.c with minor subsequent updates
* to make in work in arch/powerpc/. Original (c) belongs to Dan Malek.
*
* Author: Vitaly Bordug <[email protected]>
*
* 1999-2001 (c) Dan Malek <[email protected]>
* 2006 (c) MontaVista Software, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
/* The CPM2 internal interrupt controller. It is usually
* the only interrupt controller.
* There are two 32-bit registers (high/low) for up to 64
* possible interrupts.
*
* Now, the fun starts.....Interrupt Numbers DO NOT MAP
* in a simple arithmetic fashion to mask or pending registers.
* That is, interrupt 4 does not map to bit position 4.
* We create two tables, indexed by vector number, to indicate
* which register to use and which bit in the register to use.
*/
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <asm/immap_cpm2.h>
#include <asm/io.h>
#include "cpm2_pic.h"
/* External IRQS */
#define CPM2_IRQ_EXT1 19
#define CPM2_IRQ_EXT7 25
/* Port C IRQS */
#define CPM2_IRQ_PORTC15 48
#define CPM2_IRQ_PORTC0 63
static intctl_cpm2_t __iomem *cpm2_intctl;
static struct irq_domain *cpm2_pic_host;
static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */
static const u_char irq_to_siureg[] = {
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
};
/* bit numbers do not match the docs, these are precomputed so the bit for
* a given irq is (1 << irq_to_siubit[irq]) */
static const u_char irq_to_siubit[] = {
0, 15, 14, 13, 12, 11, 10, 9,
8, 7, 6, 5, 4, 3, 2, 1,
2, 1, 0, 14, 13, 12, 11, 10,
9, 8, 7, 6, 5, 4, 3, 0,
31, 30, 29, 28, 27, 26, 25, 24,
23, 22, 21, 20, 19, 18, 17, 16,
16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31,
};
static void cpm2_mask_irq(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
ppc_cached_irq_mask[word] &= ~(1 << bit);
out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
}
static void cpm2_unmask_irq(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
ppc_cached_irq_mask[word] |= 1 << bit;
out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
}
static void cpm2_ack(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
out_be32(&cpm2_intctl->ic_sipnrh + word, 1 << bit);
}
static void cpm2_end_irq(struct irq_data *d)
{
int bit, word;
unsigned int irq_nr = irqd_to_hwirq(d);
bit = irq_to_siubit[irq_nr];
word = irq_to_siureg[irq_nr];
ppc_cached_irq_mask[word] |= 1 << bit;
out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]);
/*
* Work around large numbers of spurious IRQs on PowerPC 82xx
* systems.
*/
mb();
}
static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
unsigned int src = irqd_to_hwirq(d);
unsigned int vold, vnew, edibit;
/* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or
* IRQ_TYPE_EDGE_BOTH (default). All others are IRQ_TYPE_EDGE_FALLING
* or IRQ_TYPE_LEVEL_LOW (default)
*/
if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) {
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_EDGE_BOTH;
if (flow_type != IRQ_TYPE_EDGE_BOTH &&
flow_type != IRQ_TYPE_EDGE_FALLING)
goto err_sense;
} else {
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_LEVEL_LOW;
if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
goto err_sense;
}
irqd_set_trigger_type(d, flow_type);
if (flow_type & IRQ_TYPE_LEVEL_LOW)
irq_set_handler_locked(d, handle_level_irq);
else
irq_set_handler_locked(d, handle_edge_irq);
/* internal IRQ senses are LEVEL_LOW
* EXT IRQ and Port C IRQ senses are programmable
*/
if (src >= CPM2_IRQ_EXT1 && src <= CPM2_IRQ_EXT7)
edibit = (14 - (src - CPM2_IRQ_EXT1));
else
if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0)
edibit = (31 - (CPM2_IRQ_PORTC0 - src));
else
return (flow_type & IRQ_TYPE_LEVEL_LOW) ?
IRQ_SET_MASK_OK_NOCOPY : -EINVAL;
vold = in_be32(&cpm2_intctl->ic_siexr);
if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING)
vnew = vold | (1 << edibit);
else
vnew = vold & ~(1 << edibit);
if (vold != vnew)
out_be32(&cpm2_intctl->ic_siexr, vnew);
return IRQ_SET_MASK_OK_NOCOPY;
err_sense:
pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type);
return -EINVAL;
}
static struct irq_chip cpm2_pic = {
.name = "CPM2 SIU",
.irq_mask = cpm2_mask_irq,
.irq_unmask = cpm2_unmask_irq,
.irq_ack = cpm2_ack,
.irq_eoi = cpm2_end_irq,
.irq_set_type = cpm2_set_irq_type,
.flags = IRQCHIP_EOI_IF_HANDLED,
};
unsigned int cpm2_get_irq(void)
{
int irq;
unsigned long bits;
/* For CPM2, read the SIVEC register and shift the bits down
* to get the irq number. */
bits = in_be32(&cpm2_intctl->ic_sivec);
irq = bits >> 26;
if (irq == 0)
return(-1);
return irq_linear_revmap(cpm2_pic_host, irq);
}
static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw);
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &cpm2_pic, handle_level_irq);
return 0;
}
static const struct irq_domain_ops cpm2_pic_host_ops = {
.map = cpm2_pic_host_map,
.xlate = irq_domain_xlate_onetwocell,
};
void cpm2_pic_init(struct device_node *node)
{
int i;
cpm2_intctl = &cpm2_immr->im_intctl;
/* Clear the CPM IRQ controller, in case it has any bits set
* from the bootloader
*/
/* Mask out everything */
out_be32(&cpm2_intctl->ic_simrh, 0x00000000);
out_be32(&cpm2_intctl->ic_simrl, 0x00000000);
wmb();
/* Ack everything */
out_be32(&cpm2_intctl->ic_sipnrh, 0xffffffff);
out_be32(&cpm2_intctl->ic_sipnrl, 0xffffffff);
wmb();
/* Dummy read of the vector */
i = in_be32(&cpm2_intctl->ic_sivec);
rmb();
/* Initialize the default interrupt mapping priorities,
* in case the boot rom changed something on us.
*/
out_be16(&cpm2_intctl->ic_sicr, 0);
out_be32(&cpm2_intctl->ic_scprrh, 0x05309770);
out_be32(&cpm2_intctl->ic_scprrl, 0x05309770);
/* create a legacy host */
cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL);
if (cpm2_pic_host == NULL) {
printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
return;
}
}
| linux-master | arch/powerpc/sysdev/cpm2_pic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016,2017 IBM Corporation.
*/
#define pr_fmt(fmt) "xive: " fmt
#include <linux/types.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/debugfs.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/msi.h>
#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xive.h>
#include <asm/xive-regs.h>
#include <asm/xmon.h>
#include "xive-internal.h"
#undef DEBUG_FLUSH
#undef DEBUG_ALL
#ifdef DEBUG_ALL
#define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
smp_processor_id(), ## __VA_ARGS__)
#else
#define DBG_VERBOSE(fmt...) do { } while(0)
#endif
bool __xive_enabled;
EXPORT_SYMBOL_GPL(__xive_enabled);
bool xive_cmdline_disabled;
/* We use only one priority for now */
static u8 xive_irq_priority;
/* TIMA exported to KVM */
void __iomem *xive_tima;
EXPORT_SYMBOL_GPL(xive_tima);
u32 xive_tima_offset;
/* Backend ops */
static const struct xive_ops *xive_ops;
/* Our global interrupt domain */
static struct irq_domain *xive_irq_domain;
#ifdef CONFIG_SMP
/* The IPIs use the same logical irq number when on the same chip */
static struct xive_ipi_desc {
unsigned int irq;
char name[16];
atomic_t started;
} *xive_ipis;
/*
* Use early_cpu_to_node() for hot-plugged CPUs
*/
static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu)
{
return xive_ipis[early_cpu_to_node(cpu)].irq;
}
#endif
/* Xive state for each CPU */
static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
/* An invalid CPU target */
#define XIVE_INVALID_TARGET (-1)
/*
* Global toggle to switch on/off StoreEOI
*/
static bool xive_store_eoi = true;
static bool xive_is_store_eoi(struct xive_irq_data *xd)
{
return xd->flags & XIVE_IRQ_FLAG_STORE_EOI && xive_store_eoi;
}
/*
* Read the next entry in a queue, return its content if it's valid
* or 0 if there is no new entry.
*
* The queue pointer is moved forward unless "just_peek" is set
*/
static u32 xive_read_eq(struct xive_q *q, bool just_peek)
{
u32 cur;
if (!q->qpage)
return 0;
cur = be32_to_cpup(q->qpage + q->idx);
/* Check valid bit (31) vs current toggle polarity */
if ((cur >> 31) == q->toggle)
return 0;
/* If consuming from the queue ... */
if (!just_peek) {
/* Next entry */
q->idx = (q->idx + 1) & q->msk;
/* Wrap around: flip valid toggle */
if (q->idx == 0)
q->toggle ^= 1;
}
/* Mask out the valid bit (31) */
return cur & 0x7fffffff;
}
/*
* Scans all the queue that may have interrupts in them
* (based on "pending_prio") in priority order until an
* interrupt is found or all the queues are empty.
*
* Then updates the CPPR (Current Processor Priority
* Register) based on the most favored interrupt found
* (0xff if none) and return what was found (0 if none).
*
* If just_peek is set, return the most favored pending
* interrupt if any but don't update the queue pointers.
*
* Note: This function can operate generically on any number
* of queues (up to 8). The current implementation of the XIVE
* driver only uses a single queue however.
*
* Note2: This will also "flush" "the pending_count" of a queue
* into the "count" when that queue is observed to be empty.
* This is used to keep track of the amount of interrupts
* targetting a queue. When an interrupt is moved away from
* a queue, we only decrement that queue count once the queue
* has been observed empty to avoid races.
*/
static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
{
u32 irq = 0;
u8 prio = 0;
/* Find highest pending priority */
while (xc->pending_prio != 0) {
struct xive_q *q;
prio = ffs(xc->pending_prio) - 1;
DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
/* Try to fetch */
irq = xive_read_eq(&xc->queue[prio], just_peek);
/* Found something ? That's it */
if (irq) {
if (just_peek || irq_to_desc(irq))
break;
/*
* We should never get here; if we do then we must
* have failed to synchronize the interrupt properly
* when shutting it down.
*/
pr_crit("xive: got interrupt %d without descriptor, dropping\n",
irq);
WARN_ON(1);
continue;
}
/* Clear pending bits */
xc->pending_prio &= ~(1 << prio);
/*
* Check if the queue count needs adjusting due to
* interrupts being moved away. See description of
* xive_dec_target_count()
*/
q = &xc->queue[prio];
if (atomic_read(&q->pending_count)) {
int p = atomic_xchg(&q->pending_count, 0);
if (p) {
WARN_ON(p > atomic_read(&q->count));
atomic_sub(p, &q->count);
}
}
}
/* If nothing was found, set CPPR to 0xff */
if (irq == 0)
prio = 0xff;
/* Update HW CPPR to match if necessary */
if (prio != xc->cppr) {
DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
xc->cppr = prio;
out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
}
return irq;
}
/*
* This is used to perform the magic loads from an ESB
* described in xive-regs.h
*/
static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
{
u64 val;
if (offset == XIVE_ESB_SET_PQ_10 && xive_is_store_eoi(xd))
offset |= XIVE_ESB_LD_ST_MO;
if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
else
val = in_be64(xd->eoi_mmio + offset);
return (u8)val;
}
static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
{
if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
else
out_be64(xd->eoi_mmio + offset, data);
}
#if defined(CONFIG_XMON) || defined(CONFIG_DEBUG_FS)
static void xive_irq_data_dump(struct xive_irq_data *xd, char *buffer, size_t size)
{
u64 val = xive_esb_read(xd, XIVE_ESB_GET);
snprintf(buffer, size, "flags=%c%c%c PQ=%c%c 0x%016llx 0x%016llx",
xive_is_store_eoi(xd) ? 'S' : ' ',
xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
val & XIVE_ESB_VAL_P ? 'P' : '-',
val & XIVE_ESB_VAL_Q ? 'Q' : '-',
xd->trig_page, xd->eoi_page);
}
#endif
#ifdef CONFIG_XMON
static notrace void xive_dump_eq(const char *name, struct xive_q *q)
{
u32 i0, i1, idx;
if (!q->qpage)
return;
idx = q->idx;
i0 = be32_to_cpup(q->qpage + idx);
idx = (idx + 1) & q->msk;
i1 = be32_to_cpup(q->qpage + idx);
xmon_printf("%s idx=%d T=%d %08x %08x ...", name,
q->idx, q->toggle, i0, i1);
}
notrace void xmon_xive_do_dump(int cpu)
{
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
xmon_printf("CPU %d:", cpu);
if (xc) {
xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
#ifdef CONFIG_SMP
{
char buffer[128];
xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer));
xmon_printf("IPI=0x%08x %s", xc->hw_ipi, buffer);
}
#endif
xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
}
xmon_printf("\n");
}
static struct irq_data *xive_get_irq_data(u32 hw_irq)
{
unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
return irq ? irq_get_irq_data(irq) : NULL;
}
int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
{
int rc;
u32 target;
u8 prio;
u32 lirq;
rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
if (rc) {
xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
return rc;
}
xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
hw_irq, target, prio, lirq);
if (!d)
d = xive_get_irq_data(hw_irq);
if (d) {
char buffer[128];
xive_irq_data_dump(irq_data_get_irq_handler_data(d),
buffer, sizeof(buffer));
xmon_printf("%s", buffer);
}
xmon_printf("\n");
return 0;
}
void xmon_xive_get_irq_all(void)
{
unsigned int i;
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
if (d)
xmon_xive_get_irq_config(irqd_to_hwirq(d), d);
}
}
#endif /* CONFIG_XMON */
static unsigned int xive_get_irq(void)
{
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
u32 irq;
/*
* This can be called either as a result of a HW interrupt or
* as a "replay" because EOI decided there was still something
* in one of the queues.
*
* First we perform an ACK cycle in order to update our mask
* of pending priorities. This will also have the effect of
* updating the CPPR to the most favored pending interrupts.
*
* In the future, if we have a way to differentiate a first
* entry (on HW interrupt) from a replay triggered by EOI,
* we could skip this on replays unless we soft-mask tells us
* that a new HW interrupt occurred.
*/
xive_ops->update_pending(xc);
DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
/* Scan our queue(s) for interrupts */
irq = xive_scan_interrupts(xc, false);
DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
irq, xc->pending_prio);
/* Return pending interrupt if any */
if (irq == XIVE_BAD_IRQ)
return 0;
return irq;
}
/*
* After EOI'ing an interrupt, we need to re-check the queue
* to see if another interrupt is pending since multiple
* interrupts can coalesce into a single notification to the
* CPU.
*
* If we find that there is indeed more in there, we call
* force_external_irq_replay() to make Linux synthetize an
* external interrupt on the next call to local_irq_restore().
*/
static void xive_do_queue_eoi(struct xive_cpu *xc)
{
if (xive_scan_interrupts(xc, true) != 0) {
DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
force_external_irq_replay();
}
}
/*
* EOI an interrupt at the source. There are several methods
* to do this depending on the HW version and source type
*/
static void xive_do_source_eoi(struct xive_irq_data *xd)
{
u8 eoi_val;
xd->stale_p = false;
/* If the XIVE supports the new "store EOI facility, use it */
if (xive_is_store_eoi(xd)) {
xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
return;
}
/*
* For LSIs, we use the "EOI cycle" special load rather than
* PQ bits, as they are automatically re-triggered in HW when
* still pending.
*/
if (xd->flags & XIVE_IRQ_FLAG_LSI) {
xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
return;
}
/*
* Otherwise, we use the special MMIO that does a clear of
* both P and Q and returns the old Q. This allows us to then
* do a re-trigger if Q was set rather than synthesizing an
* interrupt in software
*/
eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
DBG_VERBOSE("eoi_val=%x\n", eoi_val);
/* Re-trigger if needed */
if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
out_be64(xd->trig_mmio, 0);
}
/* irq_chip eoi callback, called with irq descriptor lock held */
static void xive_irq_eoi(struct irq_data *d)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
d->irq, irqd_to_hwirq(d), xc->pending_prio);
/*
* EOI the source if it hasn't been disabled and hasn't
* been passed-through to a KVM guest
*/
if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
!(xd->flags & XIVE_IRQ_FLAG_NO_EOI))
xive_do_source_eoi(xd);
else
xd->stale_p = true;
/*
* Clear saved_p to indicate that it's no longer occupying
* a queue slot on the target queue
*/
xd->saved_p = false;
/* Check for more work in the queue */
xive_do_queue_eoi(xc);
}
/*
* Helper used to mask and unmask an interrupt source.
*/
static void xive_do_source_set_mask(struct xive_irq_data *xd,
bool mask)
{
u64 val;
pr_debug("%s: HW 0x%x %smask\n", __func__, xd->hw_irq, mask ? "" : "un");
/*
* If the interrupt had P set, it may be in a queue.
*
* We need to make sure we don't re-enable it until it
* has been fetched from that queue and EOId. We keep
* a copy of that P state and use it to restore the
* ESB accordingly on unmask.
*/
if (mask) {
val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
xd->saved_p = true;
xd->stale_p = false;
} else if (xd->saved_p) {
xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
xd->saved_p = false;
} else {
xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
xd->stale_p = false;
}
}
/*
* Try to chose "cpu" as a new interrupt target. Increments
* the queue accounting for that target if it's not already
* full.
*/
static bool xive_try_pick_target(int cpu)
{
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
struct xive_q *q = &xc->queue[xive_irq_priority];
int max;
/*
* Calculate max number of interrupts in that queue.
*
* We leave a gap of 1 just in case...
*/
max = (q->msk + 1) - 1;
return !!atomic_add_unless(&q->count, 1, max);
}
/*
* Un-account an interrupt for a target CPU. We don't directly
* decrement q->count since the interrupt might still be present
* in the queue.
*
* Instead increment a separate counter "pending_count" which
* will be substracted from "count" later when that CPU observes
* the queue to be empty.
*/
static void xive_dec_target_count(int cpu)
{
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
struct xive_q *q = &xc->queue[xive_irq_priority];
if (WARN_ON(cpu < 0 || !xc)) {
pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
return;
}
/*
* We increment the "pending count" which will be used
* to decrement the target queue count whenever it's next
* processed and found empty. This ensure that we don't
* decrement while we still have the interrupt there
* occupying a slot.
*/
atomic_inc(&q->pending_count);
}
/* Find a tentative CPU target in a CPU mask */
static int xive_find_target_in_mask(const struct cpumask *mask,
unsigned int fuzz)
{
int cpu, first, num, i;
/* Pick up a starting point CPU in the mask based on fuzz */
num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
first = fuzz % num;
/* Locate it */
cpu = cpumask_first(mask);
for (i = 0; i < first && cpu < nr_cpu_ids; i++)
cpu = cpumask_next(cpu, mask);
/* Sanity check */
if (WARN_ON(cpu >= nr_cpu_ids))
cpu = cpumask_first(cpu_online_mask);
/* Remember first one to handle wrap-around */
first = cpu;
/*
* Now go through the entire mask until we find a valid
* target.
*/
do {
/*
* We re-check online as the fallback case passes us
* an untested affinity mask
*/
if (cpu_online(cpu) && xive_try_pick_target(cpu))
return cpu;
cpu = cpumask_next(cpu, mask);
/* Wrap around */
if (cpu >= nr_cpu_ids)
cpu = cpumask_first(mask);
} while (cpu != first);
return -1;
}
/*
* Pick a target CPU for an interrupt. This is done at
* startup or if the affinity is changed in a way that
* invalidates the current target.
*/
static int xive_pick_irq_target(struct irq_data *d,
const struct cpumask *affinity)
{
static unsigned int fuzz;
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
cpumask_var_t mask;
int cpu = -1;
/*
* If we have chip IDs, first we try to build a mask of
* CPUs matching the CPU and find a target in there
*/
if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
/* Build a mask of matching chip IDs */
for_each_cpu_and(cpu, affinity, cpu_online_mask) {
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
if (xc->chip_id == xd->src_chip)
cpumask_set_cpu(cpu, mask);
}
/* Try to find a target */
if (cpumask_empty(mask))
cpu = -1;
else
cpu = xive_find_target_in_mask(mask, fuzz++);
free_cpumask_var(mask);
if (cpu >= 0)
return cpu;
fuzz--;
}
/* No chip IDs, fallback to using the affinity mask */
return xive_find_target_in_mask(affinity, fuzz++);
}
static unsigned int xive_irq_startup(struct irq_data *d)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
int target, rc;
xd->saved_p = false;
xd->stale_p = false;
pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d);
/* Pick a target */
target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
if (target == XIVE_INVALID_TARGET) {
/* Try again breaking affinity */
target = xive_pick_irq_target(d, cpu_online_mask);
if (target == XIVE_INVALID_TARGET)
return -ENXIO;
pr_warn("irq %d started with broken affinity\n", d->irq);
}
/* Sanity check */
if (WARN_ON(target == XIVE_INVALID_TARGET ||
target >= nr_cpu_ids))
target = smp_processor_id();
xd->target = target;
/*
* Configure the logical number to be the Linux IRQ number
* and set the target queue
*/
rc = xive_ops->configure_irq(hw_irq,
get_hard_smp_processor_id(target),
xive_irq_priority, d->irq);
if (rc)
return rc;
/* Unmask the ESB */
xive_do_source_set_mask(xd, false);
return 0;
}
/* called with irq descriptor lock held */
static void xive_irq_shutdown(struct irq_data *d)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d);
if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
return;
/* Mask the interrupt at the source */
xive_do_source_set_mask(xd, true);
/*
* Mask the interrupt in HW in the IVT/EAS and set the number
* to be the "bad" IRQ number
*/
xive_ops->configure_irq(hw_irq,
get_hard_smp_processor_id(xd->target),
0xff, XIVE_BAD_IRQ);
xive_dec_target_count(xd->target);
xd->target = XIVE_INVALID_TARGET;
}
static void xive_irq_unmask(struct irq_data *d)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd);
xive_do_source_set_mask(xd, false);
}
static void xive_irq_mask(struct irq_data *d)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd);
xive_do_source_set_mask(xd, true);
}
static int xive_irq_set_affinity(struct irq_data *d,
const struct cpumask *cpumask,
bool force)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
u32 target, old_target;
int rc = 0;
pr_debug("%s: irq %d/0x%x\n", __func__, d->irq, hw_irq);
/* Is this valid ? */
if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
return -EINVAL;
/*
* If existing target is already in the new mask, and is
* online then do nothing.
*/
if (xd->target != XIVE_INVALID_TARGET &&
cpu_online(xd->target) &&
cpumask_test_cpu(xd->target, cpumask))
return IRQ_SET_MASK_OK;
/* Pick a new target */
target = xive_pick_irq_target(d, cpumask);
/* No target found */
if (target == XIVE_INVALID_TARGET)
return -ENXIO;
/* Sanity check */
if (WARN_ON(target >= nr_cpu_ids))
target = smp_processor_id();
old_target = xd->target;
/*
* Only configure the irq if it's not currently passed-through to
* a KVM guest
*/
if (!irqd_is_forwarded_to_vcpu(d))
rc = xive_ops->configure_irq(hw_irq,
get_hard_smp_processor_id(target),
xive_irq_priority, d->irq);
if (rc < 0) {
pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
return rc;
}
pr_debug(" target: 0x%x\n", target);
xd->target = target;
/* Give up previous target */
if (old_target != XIVE_INVALID_TARGET)
xive_dec_target_count(old_target);
return IRQ_SET_MASK_OK;
}
static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
/*
* We only support these. This has really no effect other than setting
* the corresponding descriptor bits mind you but those will in turn
* affect the resend function when re-enabling an edge interrupt.
*
* Set the default to edge as explained in map().
*/
if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_EDGE_RISING;
if (flow_type != IRQ_TYPE_EDGE_RISING &&
flow_type != IRQ_TYPE_LEVEL_LOW)
return -EINVAL;
irqd_set_trigger_type(d, flow_type);
/*
* Double check it matches what the FW thinks
*
* NOTE: We don't know yet if the PAPR interface will provide
* the LSI vs MSI information apart from the device-tree so
* this check might have to move into an optional backend call
* that is specific to the native backend
*/
if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
!!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
d->irq, (u32)irqd_to_hwirq(d),
(flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
(xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
}
return IRQ_SET_MASK_OK_NOCOPY;
}
static int xive_irq_retrigger(struct irq_data *d)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
/* This should be only for MSIs */
if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
return 0;
/*
* To perform a retrigger, we first set the PQ bits to
* 11, then perform an EOI.
*/
xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
xive_do_source_eoi(xd);
return 1;
}
/*
* Caller holds the irq descriptor lock, so this won't be called
* concurrently with xive_get_irqchip_state on the same interrupt.
*/
static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
int rc;
u8 pq;
/*
* This is called by KVM with state non-NULL for enabling
* pass-through or NULL for disabling it
*/
if (state) {
irqd_set_forwarded_to_vcpu(d);
/* Set it to PQ=10 state to prevent further sends */
pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
if (!xd->stale_p) {
xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
xd->stale_p = !xd->saved_p;
}
/* No target ? nothing to do */
if (xd->target == XIVE_INVALID_TARGET) {
/*
* An untargetted interrupt should have been
* also masked at the source
*/
WARN_ON(xd->saved_p);
return 0;
}
/*
* If P was set, adjust state to PQ=11 to indicate
* that a resend is needed for the interrupt to reach
* the guest. Also remember the value of P.
*
* This also tells us that it's in flight to a host queue
* or has already been fetched but hasn't been EOIed yet
* by the host. This it's potentially using up a host
* queue slot. This is important to know because as long
* as this is the case, we must not hard-unmask it when
* "returning" that interrupt to the host.
*
* This saved_p is cleared by the host EOI, when we know
* for sure the queue slot is no longer in use.
*/
if (xd->saved_p) {
xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
/*
* Sync the XIVE source HW to ensure the interrupt
* has gone through the EAS before we change its
* target to the guest. That should guarantee us
* that we *will* eventually get an EOI for it on
* the host. Otherwise there would be a small window
* for P to be seen here but the interrupt going
* to the guest queue.
*/
if (xive_ops->sync_source)
xive_ops->sync_source(hw_irq);
}
} else {
irqd_clr_forwarded_to_vcpu(d);
/* No host target ? hard mask and return */
if (xd->target == XIVE_INVALID_TARGET) {
xive_do_source_set_mask(xd, true);
return 0;
}
/*
* Sync the XIVE source HW to ensure the interrupt
* has gone through the EAS before we change its
* target to the host.
*/
if (xive_ops->sync_source)
xive_ops->sync_source(hw_irq);
/*
* By convention we are called with the interrupt in
* a PQ=10 or PQ=11 state, ie, it won't fire and will
* have latched in Q whether there's a pending HW
* interrupt or not.
*
* First reconfigure the target.
*/
rc = xive_ops->configure_irq(hw_irq,
get_hard_smp_processor_id(xd->target),
xive_irq_priority, d->irq);
if (rc)
return rc;
/*
* Then if saved_p is not set, effectively re-enable the
* interrupt with an EOI. If it is set, we know there is
* still a message in a host queue somewhere that will be
* EOId eventually.
*
* Note: We don't check irqd_irq_disabled(). Effectively,
* we *will* let the irq get through even if masked if the
* HW is still firing it in order to deal with the whole
* saved_p business properly. If the interrupt triggers
* while masked, the generic code will re-mask it anyway.
*/
if (!xd->saved_p)
xive_do_source_eoi(xd);
}
return 0;
}
/* Called with irq descriptor lock held. */
static int xive_get_irqchip_state(struct irq_data *data,
enum irqchip_irq_state which, bool *state)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
u8 pq;
switch (which) {
case IRQCHIP_STATE_ACTIVE:
pq = xive_esb_read(xd, XIVE_ESB_GET);
/*
* The esb value being all 1's means we couldn't get
* the PQ state of the interrupt through mmio. It may
* happen, for example when querying a PHB interrupt
* while the PHB is in an error state. We consider the
* interrupt to be inactive in that case.
*/
*state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
(xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) &&
!irqd_irq_disabled(data)));
return 0;
default:
return -EINVAL;
}
}
static struct irq_chip xive_irq_chip = {
.name = "XIVE-IRQ",
.irq_startup = xive_irq_startup,
.irq_shutdown = xive_irq_shutdown,
.irq_eoi = xive_irq_eoi,
.irq_mask = xive_irq_mask,
.irq_unmask = xive_irq_unmask,
.irq_set_affinity = xive_irq_set_affinity,
.irq_set_type = xive_irq_set_type,
.irq_retrigger = xive_irq_retrigger,
.irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
.irq_get_irqchip_state = xive_get_irqchip_state,
};
bool is_xive_irq(struct irq_chip *chip)
{
return chip == &xive_irq_chip;
}
EXPORT_SYMBOL_GPL(is_xive_irq);
void xive_cleanup_irq_data(struct xive_irq_data *xd)
{
pr_debug("%s for HW 0x%x\n", __func__, xd->hw_irq);
if (xd->eoi_mmio) {
iounmap(xd->eoi_mmio);
if (xd->eoi_mmio == xd->trig_mmio)
xd->trig_mmio = NULL;
xd->eoi_mmio = NULL;
}
if (xd->trig_mmio) {
iounmap(xd->trig_mmio);
xd->trig_mmio = NULL;
}
}
EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
{
struct xive_irq_data *xd;
int rc;
xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
if (!xd)
return -ENOMEM;
rc = xive_ops->populate_irq_data(hw, xd);
if (rc) {
kfree(xd);
return rc;
}
xd->target = XIVE_INVALID_TARGET;
irq_set_handler_data(virq, xd);
/*
* Turn OFF by default the interrupt being mapped. A side
* effect of this check is the mapping the ESB page of the
* interrupt in the Linux address space. This prevents page
* fault issues in the crash handler which masks all
* interrupts.
*/
xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
return 0;
}
void xive_irq_free_data(unsigned int virq)
{
struct xive_irq_data *xd = irq_get_handler_data(virq);
if (!xd)
return;
irq_set_handler_data(virq, NULL);
xive_cleanup_irq_data(xd);
kfree(xd);
}
EXPORT_SYMBOL_GPL(xive_irq_free_data);
#ifdef CONFIG_SMP
static void xive_cause_ipi(int cpu)
{
struct xive_cpu *xc;
struct xive_irq_data *xd;
xc = per_cpu(xive_cpu, cpu);
DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
smp_processor_id(), cpu, xc->hw_ipi);
xd = &xc->ipi_data;
if (WARN_ON(!xd->trig_mmio))
return;
out_be64(xd->trig_mmio, 0);
}
static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
{
return smp_ipi_demux();
}
static void xive_ipi_eoi(struct irq_data *d)
{
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
/* Handle possible race with unplug and drop stale IPIs */
if (!xc)
return;
DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
xive_do_source_eoi(&xc->ipi_data);
xive_do_queue_eoi(xc);
}
static void xive_ipi_do_nothing(struct irq_data *d)
{
/*
* Nothing to do, we never mask/unmask IPIs, but the callback
* has to exist for the struct irq_chip.
*/
}
static struct irq_chip xive_ipi_chip = {
.name = "XIVE-IPI",
.irq_eoi = xive_ipi_eoi,
.irq_mask = xive_ipi_do_nothing,
.irq_unmask = xive_ipi_do_nothing,
};
/*
* IPIs are marked per-cpu. We use separate HW interrupts under the
* hood but associated with the same "linux" interrupt
*/
struct xive_ipi_alloc_info {
irq_hw_number_t hwirq;
};
static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct xive_ipi_alloc_info *info = arg;
int i;
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip,
domain->host_data, handle_percpu_irq,
NULL, NULL);
}
return 0;
}
static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
.alloc = xive_ipi_irq_domain_alloc,
};
static int __init xive_init_ipis(void)
{
struct fwnode_handle *fwnode;
struct irq_domain *ipi_domain;
unsigned int node;
int ret = -ENOMEM;
fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI");
if (!fwnode)
goto out;
ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids,
&xive_ipi_irq_domain_ops, NULL);
if (!ipi_domain)
goto out_free_fwnode;
xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL);
if (!xive_ipis)
goto out_free_domain;
for_each_node(node) {
struct xive_ipi_desc *xid = &xive_ipis[node];
struct xive_ipi_alloc_info info = { node };
/*
* Map one IPI interrupt per node for all cpus of that node.
* Since the HW interrupt number doesn't have any meaning,
* simply use the node number.
*/
ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
if (ret < 0)
goto out_free_xive_ipis;
xid->irq = ret;
snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
}
return ret;
out_free_xive_ipis:
kfree(xive_ipis);
out_free_domain:
irq_domain_remove(ipi_domain);
out_free_fwnode:
irq_domain_free_fwnode(fwnode);
out:
return ret;
}
static int xive_request_ipi(unsigned int cpu)
{
struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
int ret;
if (atomic_inc_return(&xid->started) > 1)
return 0;
ret = request_irq(xid->irq, xive_muxed_ipi_action,
IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD,
xid->name, NULL);
WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
return ret;
}
static int xive_setup_cpu_ipi(unsigned int cpu)
{
unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
struct xive_cpu *xc;
int rc;
pr_debug("Setting up IPI for CPU %d\n", cpu);
xc = per_cpu(xive_cpu, cpu);
/* Check if we are already setup */
if (xc->hw_ipi != XIVE_BAD_IRQ)
return 0;
/* Register the IPI */
xive_request_ipi(cpu);
/* Grab an IPI from the backend, this will populate xc->hw_ipi */
if (xive_ops->get_ipi(cpu, xc))
return -EIO;
/*
* Populate the IRQ data in the xive_cpu structure and
* configure the HW / enable the IPIs.
*/
rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
if (rc) {
pr_err("Failed to populate IPI data on CPU %d\n", cpu);
return -EIO;
}
rc = xive_ops->configure_irq(xc->hw_ipi,
get_hard_smp_processor_id(cpu),
xive_irq_priority, xive_ipi_irq);
if (rc) {
pr_err("Failed to map IPI CPU %d\n", cpu);
return -EIO;
}
pr_debug("CPU %d HW IPI 0x%x, virq %d, trig_mmio=%p\n", cpu,
xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
/* Unmask it */
xive_do_source_set_mask(&xc->ipi_data, false);
return 0;
}
noinstr static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
{
unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
/* Disable the IPI and free the IRQ data */
/* Already cleaned up ? */
if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
/* TODO: clear IPI mapping */
/* Mask the IPI */
xive_do_source_set_mask(&xc->ipi_data, true);
/*
* Note: We don't call xive_cleanup_irq_data() to free
* the mappings as this is called from an IPI on kexec
* which is not a safe environment to call iounmap()
*/
/* Deconfigure/mask in the backend */
xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
0xff, xive_ipi_irq);
/* Free the IPIs in the backend */
xive_ops->put_ipi(cpu, xc);
}
void __init xive_smp_probe(void)
{
smp_ops->cause_ipi = xive_cause_ipi;
/* Register the IPI */
xive_init_ipis();
/* Allocate and setup IPI for the boot CPU */
xive_setup_cpu_ipi(smp_processor_id());
}
#endif /* CONFIG_SMP */
static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
int rc;
/*
* Mark interrupts as edge sensitive by default so that resend
* actually works. Will fix that up below if needed.
*/
irq_clear_status_flags(virq, IRQ_LEVEL);
rc = xive_irq_alloc_data(virq, hw);
if (rc)
return rc;
irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
return 0;
}
static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
{
xive_irq_free_data(virq);
}
static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
*out_hwirq = intspec[0];
/*
* If intsize is at least 2, we look for the type in the second cell,
* we assume the LSB indicates a level interrupt.
*/
if (intsize > 1) {
if (intspec[1] & 1)
*out_flags = IRQ_TYPE_LEVEL_LOW;
else
*out_flags = IRQ_TYPE_EDGE_RISING;
} else
*out_flags = IRQ_TYPE_LEVEL_LOW;
return 0;
}
static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
enum irq_domain_bus_token bus_token)
{
return xive_ops->match(node);
}
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
static const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" };
static const struct {
u64 mask;
char *name;
} xive_irq_flags[] = {
{ XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" },
{ XIVE_IRQ_FLAG_LSI, "LSI" },
{ XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" },
{ XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" },
};
static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d,
struct irq_data *irqd, int ind)
{
struct xive_irq_data *xd;
u64 val;
int i;
/* No IRQ domain level information. To be done */
if (!irqd)
return;
if (!is_xive_irq(irq_data_get_irq_chip(irqd)))
return;
seq_printf(m, "%*sXIVE:\n", ind, "");
ind++;
xd = irq_data_get_irq_handler_data(irqd);
if (!xd) {
seq_printf(m, "%*snot assigned\n", ind, "");
return;
}
val = xive_esb_read(xd, XIVE_ESB_GET);
seq_printf(m, "%*sESB: %s\n", ind, "", esb_names[val & 0x3]);
seq_printf(m, "%*sPstate: %s %s\n", ind, "", xd->stale_p ? "stale" : "",
xd->saved_p ? "saved" : "");
seq_printf(m, "%*sTarget: %d\n", ind, "", xd->target);
seq_printf(m, "%*sChip: %d\n", ind, "", xd->src_chip);
seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page);
seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page);
seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags);
for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) {
if (xd->flags & xive_irq_flags[i].mask)
seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name);
}
}
#endif
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
static int xive_irq_domain_translate(struct irq_domain *d,
struct irq_fwspec *fwspec,
unsigned long *hwirq,
unsigned int *type)
{
return xive_irq_domain_xlate(d, to_of_node(fwspec->fwnode),
fwspec->param, fwspec->param_count,
hwirq, type);
}
static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct irq_fwspec *fwspec = arg;
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
int i, rc;
rc = xive_irq_domain_translate(domain, fwspec, &hwirq, &type);
if (rc)
return rc;
pr_debug("%s %d/0x%lx #%d\n", __func__, virq, hwirq, nr_irqs);
for (i = 0; i < nr_irqs; i++) {
/* TODO: call xive_irq_domain_map() */
/*
* Mark interrupts as edge sensitive by default so that resend
* actually works. Will fix that up below if needed.
*/
irq_clear_status_flags(virq, IRQ_LEVEL);
/* allocates and sets handler data */
rc = xive_irq_alloc_data(virq + i, hwirq + i);
if (rc)
return rc;
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
&xive_irq_chip, domain->host_data);
irq_set_handler(virq + i, handle_fasteoi_irq);
}
return 0;
}
static void xive_irq_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
int i;
pr_debug("%s %d #%d\n", __func__, virq, nr_irqs);
for (i = 0; i < nr_irqs; i++)
xive_irq_free_data(virq + i);
}
#endif
static const struct irq_domain_ops xive_irq_domain_ops = {
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
.alloc = xive_irq_domain_alloc,
.free = xive_irq_domain_free,
.translate = xive_irq_domain_translate,
#endif
.match = xive_irq_domain_match,
.map = xive_irq_domain_map,
.unmap = xive_irq_domain_unmap,
.xlate = xive_irq_domain_xlate,
#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
.debug_show = xive_irq_domain_debug_show,
#endif
};
static void __init xive_init_host(struct device_node *np)
{
xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL);
if (WARN_ON(xive_irq_domain == NULL))
return;
irq_set_default_host(xive_irq_domain);
}
static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
{
if (xc->queue[xive_irq_priority].qpage)
xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
}
static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
{
int rc = 0;
/* We setup 1 queues for now with a 64k page */
if (!xc->queue[xive_irq_priority].qpage)
rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
return rc;
}
static int xive_prepare_cpu(unsigned int cpu)
{
struct xive_cpu *xc;
xc = per_cpu(xive_cpu, cpu);
if (!xc) {
xc = kzalloc_node(sizeof(struct xive_cpu),
GFP_KERNEL, cpu_to_node(cpu));
if (!xc)
return -ENOMEM;
xc->hw_ipi = XIVE_BAD_IRQ;
xc->chip_id = XIVE_INVALID_CHIP_ID;
if (xive_ops->prepare_cpu)
xive_ops->prepare_cpu(cpu, xc);
per_cpu(xive_cpu, cpu) = xc;
}
/* Setup EQs if not already */
return xive_setup_cpu_queues(cpu, xc);
}
static void xive_setup_cpu(void)
{
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
/* The backend might have additional things to do */
if (xive_ops->setup_cpu)
xive_ops->setup_cpu(smp_processor_id(), xc);
/* Set CPPR to 0xff to enable flow of interrupts */
xc->cppr = 0xff;
out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
}
#ifdef CONFIG_SMP
void xive_smp_setup_cpu(void)
{
pr_debug("SMP setup CPU %d\n", smp_processor_id());
/* This will have already been done on the boot CPU */
if (smp_processor_id() != boot_cpuid)
xive_setup_cpu();
}
int xive_smp_prepare_cpu(unsigned int cpu)
{
int rc;
/* Allocate per-CPU data and queues */
rc = xive_prepare_cpu(cpu);
if (rc)
return rc;
/* Allocate and setup IPI for the new CPU */
return xive_setup_cpu_ipi(cpu);
}
#ifdef CONFIG_HOTPLUG_CPU
static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
{
u32 irq;
/* We assume local irqs are disabled */
WARN_ON(!irqs_disabled());
/* Check what's already in the CPU queue */
while ((irq = xive_scan_interrupts(xc, false)) != 0) {
/*
* We need to re-route that interrupt to its new destination.
* First get and lock the descriptor
*/
struct irq_desc *desc = irq_to_desc(irq);
struct irq_data *d = irq_desc_get_irq_data(desc);
struct xive_irq_data *xd;
/*
* Ignore anything that isn't a XIVE irq and ignore
* IPIs, so can just be dropped.
*/
if (d->domain != xive_irq_domain)
continue;
/*
* The IRQ should have already been re-routed, it's just a
* stale in the old queue, so re-trigger it in order to make
* it reach is new destination.
*/
#ifdef DEBUG_FLUSH
pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
cpu, irq);
#endif
raw_spin_lock(&desc->lock);
xd = irq_desc_get_handler_data(desc);
/*
* Clear saved_p to indicate that it's no longer pending
*/
xd->saved_p = false;
/*
* For LSIs, we EOI, this will cause a resend if it's
* still asserted. Otherwise do an MSI retrigger.
*/
if (xd->flags & XIVE_IRQ_FLAG_LSI)
xive_do_source_eoi(xd);
else
xive_irq_retrigger(d);
raw_spin_unlock(&desc->lock);
}
}
void xive_smp_disable_cpu(void)
{
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
unsigned int cpu = smp_processor_id();
/* Migrate interrupts away from the CPU */
irq_migrate_all_off_this_cpu();
/* Set CPPR to 0 to disable flow of interrupts */
xc->cppr = 0;
out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
/* Flush everything still in the queue */
xive_flush_cpu_queue(cpu, xc);
/* Re-enable CPPR */
xc->cppr = 0xff;
out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
}
void xive_flush_interrupt(void)
{
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
unsigned int cpu = smp_processor_id();
/* Called if an interrupt occurs while the CPU is hot unplugged */
xive_flush_cpu_queue(cpu, xc);
}
#endif /* CONFIG_HOTPLUG_CPU */
#endif /* CONFIG_SMP */
noinstr void xive_teardown_cpu(void)
{
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
unsigned int cpu = smp_processor_id();
/* Set CPPR to 0 to disable flow of interrupts */
xc->cppr = 0;
out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
if (xive_ops->teardown_cpu)
xive_ops->teardown_cpu(cpu, xc);
#ifdef CONFIG_SMP
/* Get rid of IPI */
xive_cleanup_cpu_ipi(cpu, xc);
#endif
/* Disable and free the queues */
xive_cleanup_cpu_queues(cpu, xc);
}
void xive_shutdown(void)
{
xive_ops->shutdown();
}
bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops,
void __iomem *area, u32 offset, u8 max_prio)
{
xive_tima = area;
xive_tima_offset = offset;
xive_ops = ops;
xive_irq_priority = max_prio;
ppc_md.get_irq = xive_get_irq;
__xive_enabled = true;
pr_debug("Initializing host..\n");
xive_init_host(np);
pr_debug("Initializing boot CPU..\n");
/* Allocate per-CPU data and queues */
xive_prepare_cpu(smp_processor_id());
/* Get ready for interrupts */
xive_setup_cpu();
pr_info("Interrupt handling initialized with %s backend\n",
xive_ops->name);
pr_info("Using priority %d for all interrupts\n", max_prio);
return true;
}
__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
{
unsigned int alloc_order;
struct page *pages;
__be32 *qpage;
alloc_order = xive_alloc_order(queue_shift);
pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
if (!pages)
return ERR_PTR(-ENOMEM);
qpage = (__be32 *)page_address(pages);
memset(qpage, 0, 1 << queue_shift);
return qpage;
}
static int __init xive_off(char *arg)
{
xive_cmdline_disabled = true;
return 1;
}
__setup("xive=off", xive_off);
static int __init xive_store_eoi_cmdline(char *arg)
{
if (!arg)
return 1;
if (strncmp(arg, "off", 3) == 0) {
pr_info("StoreEOI disabled on kernel command line\n");
xive_store_eoi = false;
}
return 1;
}
__setup("xive.store-eoi=", xive_store_eoi_cmdline);
#ifdef CONFIG_DEBUG_FS
static void xive_debug_show_ipi(struct seq_file *m, int cpu)
{
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
seq_printf(m, "CPU %d: ", cpu);
if (xc) {
seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
#ifdef CONFIG_SMP
{
char buffer[128];
xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer));
seq_printf(m, "IPI=0x%08x %s", xc->hw_ipi, buffer);
}
#endif
}
seq_puts(m, "\n");
}
static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
int rc;
u32 target;
u8 prio;
u32 lirq;
char buffer[128];
rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
if (rc) {
seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
return;
}
seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
hw_irq, target, prio, lirq);
xive_irq_data_dump(irq_data_get_irq_handler_data(d), buffer, sizeof(buffer));
seq_puts(m, buffer);
seq_puts(m, "\n");
}
static int xive_irq_debug_show(struct seq_file *m, void *private)
{
unsigned int i;
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
if (d)
xive_debug_show_irq(m, d);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(xive_irq_debug);
static int xive_ipi_debug_show(struct seq_file *m, void *private)
{
int cpu;
if (xive_ops->debug_show)
xive_ops->debug_show(m, private);
for_each_online_cpu(cpu)
xive_debug_show_ipi(m, cpu);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(xive_ipi_debug);
static void xive_eq_debug_show_one(struct seq_file *m, struct xive_q *q, u8 prio)
{
int i;
seq_printf(m, "EQ%d idx=%d T=%d\n", prio, q->idx, q->toggle);
if (q->qpage) {
for (i = 0; i < q->msk + 1; i++) {
if (!(i % 8))
seq_printf(m, "%05d ", i);
seq_printf(m, "%08x%s", be32_to_cpup(q->qpage + i),
(i + 1) % 8 ? " " : "\n");
}
}
seq_puts(m, "\n");
}
static int xive_eq_debug_show(struct seq_file *m, void *private)
{
int cpu = (long)m->private;
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
if (xc)
xive_eq_debug_show_one(m, &xc->queue[xive_irq_priority],
xive_irq_priority);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(xive_eq_debug);
static void xive_core_debugfs_create(void)
{
struct dentry *xive_dir;
struct dentry *xive_eq_dir;
long cpu;
char name[16];
xive_dir = debugfs_create_dir("xive", arch_debugfs_dir);
if (IS_ERR(xive_dir))
return;
debugfs_create_file("ipis", 0400, xive_dir,
NULL, &xive_ipi_debug_fops);
debugfs_create_file("interrupts", 0400, xive_dir,
NULL, &xive_irq_debug_fops);
xive_eq_dir = debugfs_create_dir("eqs", xive_dir);
for_each_possible_cpu(cpu) {
snprintf(name, sizeof(name), "cpu%ld", cpu);
debugfs_create_file(name, 0400, xive_eq_dir, (void *)cpu,
&xive_eq_debug_fops);
}
debugfs_create_bool("store-eoi", 0600, xive_dir, &xive_store_eoi);
if (xive_ops->debug_create)
xive_ops->debug_create(xive_dir);
}
#else
static inline void xive_core_debugfs_create(void) { }
#endif /* CONFIG_DEBUG_FS */
int xive_core_debug_init(void)
{
if (xive_enabled() && IS_ENABLED(CONFIG_DEBUG_FS))
xive_core_debugfs_create();
return 0;
}
| linux-master | arch/powerpc/sysdev/xive/common.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016,2017 IBM Corporation.
*/
#define pr_fmt(fmt) "xive: " fmt
#include <linux/types.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bitmap.h>
#include <linux/cpumask.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/libfdt.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xive.h>
#include <asm/xive-regs.h>
#include <asm/hvcall.h>
#include <asm/svm.h>
#include <asm/ultravisor.h>
#include "xive-internal.h"
static u32 xive_queue_shift;
struct xive_irq_bitmap {
unsigned long *bitmap;
unsigned int base;
unsigned int count;
spinlock_t lock;
struct list_head list;
};
static LIST_HEAD(xive_irq_bitmaps);
static int __init xive_irq_bitmap_add(int base, int count)
{
struct xive_irq_bitmap *xibm;
xibm = kzalloc(sizeof(*xibm), GFP_KERNEL);
if (!xibm)
return -ENOMEM;
spin_lock_init(&xibm->lock);
xibm->base = base;
xibm->count = count;
xibm->bitmap = bitmap_zalloc(xibm->count, GFP_KERNEL);
if (!xibm->bitmap) {
kfree(xibm);
return -ENOMEM;
}
list_add(&xibm->list, &xive_irq_bitmaps);
pr_info("Using IRQ range [%x-%x]", xibm->base,
xibm->base + xibm->count - 1);
return 0;
}
static void xive_irq_bitmap_remove_all(void)
{
struct xive_irq_bitmap *xibm, *tmp;
list_for_each_entry_safe(xibm, tmp, &xive_irq_bitmaps, list) {
list_del(&xibm->list);
bitmap_free(xibm->bitmap);
kfree(xibm);
}
}
static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
{
int irq;
irq = find_first_zero_bit(xibm->bitmap, xibm->count);
if (irq != xibm->count) {
set_bit(irq, xibm->bitmap);
irq += xibm->base;
} else {
irq = -ENOMEM;
}
return irq;
}
static int xive_irq_bitmap_alloc(void)
{
struct xive_irq_bitmap *xibm;
unsigned long flags;
int irq = -ENOENT;
list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
spin_lock_irqsave(&xibm->lock, flags);
irq = __xive_irq_bitmap_alloc(xibm);
spin_unlock_irqrestore(&xibm->lock, flags);
if (irq >= 0)
break;
}
return irq;
}
static void xive_irq_bitmap_free(int irq)
{
unsigned long flags;
struct xive_irq_bitmap *xibm;
list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
if ((irq >= xibm->base) && (irq < xibm->base + xibm->count)) {
spin_lock_irqsave(&xibm->lock, flags);
clear_bit(irq - xibm->base, xibm->bitmap);
spin_unlock_irqrestore(&xibm->lock, flags);
break;
}
}
}
/* Based on the similar routines in RTAS */
static unsigned int plpar_busy_delay_time(long rc)
{
unsigned int ms = 0;
if (H_IS_LONG_BUSY(rc)) {
ms = get_longbusy_msecs(rc);
} else if (rc == H_BUSY) {
ms = 10; /* seems appropriate for XIVE hcalls */
}
return ms;
}
static unsigned int plpar_busy_delay(int rc)
{
unsigned int ms;
ms = plpar_busy_delay_time(rc);
if (ms)
mdelay(ms);
return ms;
}
/*
* Note: this call has a partition wide scope and can take a while to
* complete. If it returns H_LONG_BUSY_* it should be retried
* periodically.
*/
static long plpar_int_reset(unsigned long flags)
{
long rc;
do {
rc = plpar_hcall_norets(H_INT_RESET, flags);
} while (plpar_busy_delay(rc));
if (rc)
pr_err("H_INT_RESET failed %ld\n", rc);
return rc;
}
static long plpar_int_get_source_info(unsigned long flags,
unsigned long lisn,
unsigned long *src_flags,
unsigned long *eoi_page,
unsigned long *trig_page,
unsigned long *esb_shift)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
do {
rc = plpar_hcall(H_INT_GET_SOURCE_INFO, retbuf, flags, lisn);
} while (plpar_busy_delay(rc));
if (rc) {
pr_err("H_INT_GET_SOURCE_INFO lisn=0x%lx failed %ld\n", lisn, rc);
return rc;
}
*src_flags = retbuf[0];
*eoi_page = retbuf[1];
*trig_page = retbuf[2];
*esb_shift = retbuf[3];
pr_debug("H_INT_GET_SOURCE_INFO lisn=0x%lx flags=0x%lx eoi=0x%lx trig=0x%lx shift=0x%lx\n",
lisn, retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
return 0;
}
#define XIVE_SRC_SET_EISN (1ull << (63 - 62))
#define XIVE_SRC_MASK (1ull << (63 - 63)) /* unused */
static long plpar_int_set_source_config(unsigned long flags,
unsigned long lisn,
unsigned long target,
unsigned long prio,
unsigned long sw_irq)
{
long rc;
pr_debug("H_INT_SET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx target=%ld prio=%ld sw_irq=%ld\n",
flags, lisn, target, prio, sw_irq);
do {
rc = plpar_hcall_norets(H_INT_SET_SOURCE_CONFIG, flags, lisn,
target, prio, sw_irq);
} while (plpar_busy_delay(rc));
if (rc) {
pr_err("H_INT_SET_SOURCE_CONFIG lisn=0x%lx target=%ld prio=%ld failed %ld\n",
lisn, target, prio, rc);
return rc;
}
return 0;
}
static long plpar_int_get_source_config(unsigned long flags,
unsigned long lisn,
unsigned long *target,
unsigned long *prio,
unsigned long *sw_irq)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
pr_debug("H_INT_GET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx\n", flags, lisn);
do {
rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
target, prio, sw_irq);
} while (plpar_busy_delay(rc));
if (rc) {
pr_err("H_INT_GET_SOURCE_CONFIG lisn=0x%lx failed %ld\n",
lisn, rc);
return rc;
}
*target = retbuf[0];
*prio = retbuf[1];
*sw_irq = retbuf[2];
pr_debug("H_INT_GET_SOURCE_CONFIG target=%ld prio=%ld sw_irq=%ld\n",
retbuf[0], retbuf[1], retbuf[2]);
return 0;
}
static long plpar_int_get_queue_info(unsigned long flags,
unsigned long target,
unsigned long priority,
unsigned long *esn_page,
unsigned long *esn_size)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
do {
rc = plpar_hcall(H_INT_GET_QUEUE_INFO, retbuf, flags, target,
priority);
} while (plpar_busy_delay(rc));
if (rc) {
pr_err("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld failed %ld\n",
target, priority, rc);
return rc;
}
*esn_page = retbuf[0];
*esn_size = retbuf[1];
pr_debug("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld page=0x%lx size=0x%lx\n",
target, priority, retbuf[0], retbuf[1]);
return 0;
}
#define XIVE_EQ_ALWAYS_NOTIFY (1ull << (63 - 63))
static long plpar_int_set_queue_config(unsigned long flags,
unsigned long target,
unsigned long priority,
unsigned long qpage,
unsigned long qsize)
{
long rc;
pr_debug("H_INT_SET_QUEUE_CONFIG flags=0x%lx target=%ld priority=0x%lx qpage=0x%lx qsize=0x%lx\n",
flags, target, priority, qpage, qsize);
do {
rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
priority, qpage, qsize);
} while (plpar_busy_delay(rc));
if (rc) {
pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=0x%lx returned %ld\n",
target, priority, qpage, rc);
return rc;
}
return 0;
}
static long plpar_int_sync(unsigned long flags, unsigned long lisn)
{
long rc;
do {
rc = plpar_hcall_norets(H_INT_SYNC, flags, lisn);
} while (plpar_busy_delay(rc));
if (rc) {
pr_err("H_INT_SYNC lisn=0x%lx returned %ld\n", lisn, rc);
return rc;
}
return 0;
}
#define XIVE_ESB_FLAG_STORE (1ull << (63 - 63))
static long plpar_int_esb(unsigned long flags,
unsigned long lisn,
unsigned long offset,
unsigned long in_data,
unsigned long *out_data)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
pr_debug("H_INT_ESB flags=0x%lx lisn=0x%lx offset=0x%lx in=0x%lx\n",
flags, lisn, offset, in_data);
do {
rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
in_data);
} while (plpar_busy_delay(rc));
if (rc) {
pr_err("H_INT_ESB lisn=0x%lx offset=0x%lx returned %ld\n",
lisn, offset, rc);
return rc;
}
*out_data = retbuf[0];
return 0;
}
static u64 xive_spapr_esb_rw(u32 lisn, u32 offset, u64 data, bool write)
{
unsigned long read_data;
long rc;
rc = plpar_int_esb(write ? XIVE_ESB_FLAG_STORE : 0,
lisn, offset, data, &read_data);
if (rc)
return -1;
return write ? 0 : read_data;
}
#define XIVE_SRC_H_INT_ESB (1ull << (63 - 60))
#define XIVE_SRC_LSI (1ull << (63 - 61))
#define XIVE_SRC_TRIGGER (1ull << (63 - 62))
#define XIVE_SRC_STORE_EOI (1ull << (63 - 63))
static int xive_spapr_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
{
long rc;
unsigned long flags;
unsigned long eoi_page;
unsigned long trig_page;
unsigned long esb_shift;
memset(data, 0, sizeof(*data));
rc = plpar_int_get_source_info(0, hw_irq, &flags, &eoi_page, &trig_page,
&esb_shift);
if (rc)
return -EINVAL;
if (flags & XIVE_SRC_H_INT_ESB)
data->flags |= XIVE_IRQ_FLAG_H_INT_ESB;
if (flags & XIVE_SRC_STORE_EOI)
data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
if (flags & XIVE_SRC_LSI)
data->flags |= XIVE_IRQ_FLAG_LSI;
data->eoi_page = eoi_page;
data->esb_shift = esb_shift;
data->trig_page = trig_page;
data->hw_irq = hw_irq;
/*
* No chip-id for the sPAPR backend. This has an impact how we
* pick a target. See xive_pick_irq_target().
*/
data->src_chip = XIVE_INVALID_CHIP_ID;
/*
* When the H_INT_ESB flag is set, the H_INT_ESB hcall should
* be used for interrupt management. Skip the remapping of the
* ESB pages which are not available.
*/
if (data->flags & XIVE_IRQ_FLAG_H_INT_ESB)
return 0;
data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
if (!data->eoi_mmio) {
pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
return -ENOMEM;
}
/* Full function page supports trigger */
if (flags & XIVE_SRC_TRIGGER) {
data->trig_mmio = data->eoi_mmio;
return 0;
}
data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
if (!data->trig_mmio) {
iounmap(data->eoi_mmio);
pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
return -ENOMEM;
}
return 0;
}
static int xive_spapr_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
{
long rc;
rc = plpar_int_set_source_config(XIVE_SRC_SET_EISN, hw_irq, target,
prio, sw_irq);
return rc == 0 ? 0 : -ENXIO;
}
static int xive_spapr_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
u32 *sw_irq)
{
long rc;
unsigned long h_target;
unsigned long h_prio;
unsigned long h_sw_irq;
rc = plpar_int_get_source_config(0, hw_irq, &h_target, &h_prio,
&h_sw_irq);
*target = h_target;
*prio = h_prio;
*sw_irq = h_sw_irq;
return rc == 0 ? 0 : -ENXIO;
}
/* This can be called multiple time to change a queue configuration */
static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
__be32 *qpage, u32 order)
{
s64 rc = 0;
unsigned long esn_page;
unsigned long esn_size;
u64 flags, qpage_phys;
/* If there's an actual queue page, clean it */
if (order) {
if (WARN_ON(!qpage))
return -EINVAL;
qpage_phys = __pa(qpage);
} else {
qpage_phys = 0;
}
/* Initialize the rest of the fields */
q->msk = order ? ((1u << (order - 2)) - 1) : 0;
q->idx = 0;
q->toggle = 0;
rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
if (rc) {
pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
target, prio);
rc = -EIO;
goto fail;
}
/* TODO: add support for the notification page */
q->eoi_phys = esn_page;
/* Default is to always notify */
flags = XIVE_EQ_ALWAYS_NOTIFY;
/* Configure and enable the queue in HW */
rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
if (rc) {
pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
target, prio);
rc = -EIO;
} else {
q->qpage = qpage;
if (is_secure_guest())
uv_share_page(PHYS_PFN(qpage_phys),
1 << xive_alloc_order(order));
}
fail:
return rc;
}
static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
u8 prio)
{
struct xive_q *q = &xc->queue[prio];
__be32 *qpage;
qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
if (IS_ERR(qpage))
return PTR_ERR(qpage);
return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
q, prio, qpage, xive_queue_shift);
}
static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
u8 prio)
{
struct xive_q *q = &xc->queue[prio];
unsigned int alloc_order;
long rc;
int hw_cpu = get_hard_smp_processor_id(cpu);
rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
if (rc)
pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
hw_cpu, prio);
alloc_order = xive_alloc_order(xive_queue_shift);
if (is_secure_guest())
uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
free_pages((unsigned long)q->qpage, alloc_order);
q->qpage = NULL;
}
static bool xive_spapr_match(struct device_node *node)
{
/* Ignore cascaded controllers for the moment */
return true;
}
#ifdef CONFIG_SMP
static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
{
int irq = xive_irq_bitmap_alloc();
if (irq < 0) {
pr_err("Failed to allocate IPI on CPU %d\n", cpu);
return -ENXIO;
}
xc->hw_ipi = irq;
return 0;
}
static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
{
if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
xive_irq_bitmap_free(xc->hw_ipi);
xc->hw_ipi = XIVE_BAD_IRQ;
}
#endif /* CONFIG_SMP */
static void xive_spapr_shutdown(void)
{
plpar_int_reset(0);
}
/*
* Perform an "ack" cycle on the current thread. Grab the pending
* active priorities and update the CPPR to the most favored one.
*/
static void xive_spapr_update_pending(struct xive_cpu *xc)
{
u8 nsr, cppr;
u16 ack;
/*
* Perform the "Acknowledge O/S to Register" cycle.
*
* Let's speedup the access to the TIMA using the raw I/O
* accessor as we don't need the synchronisation routine of
* the higher level ones
*/
ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
/* Synchronize subsequent queue accesses */
mb();
/*
* Grab the CPPR and the "NSR" field which indicates the source
* of the interrupt (if any)
*/
cppr = ack & 0xff;
nsr = ack >> 8;
if (nsr & TM_QW1_NSR_EO) {
if (cppr == 0xff)
return;
/* Mark the priority pending */
xc->pending_prio |= 1 << cppr;
/*
* A new interrupt should never have a CPPR less favored
* than our current one.
*/
if (cppr >= xc->cppr)
pr_err("CPU %d odd ack CPPR, got %d at %d\n",
smp_processor_id(), cppr, xc->cppr);
/* Update our idea of what the CPPR is */
xc->cppr = cppr;
}
}
static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
{
/* Only some debug on the TIMA settings */
pr_debug("(HW value: %08x %08x %08x)\n",
in_be32(xive_tima + TM_QW1_OS + TM_WORD0),
in_be32(xive_tima + TM_QW1_OS + TM_WORD1),
in_be32(xive_tima + TM_QW1_OS + TM_WORD2));
}
static void xive_spapr_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
{
/* Nothing to do */;
}
static void xive_spapr_sync_source(u32 hw_irq)
{
/* Specs are unclear on what this is doing */
plpar_int_sync(0, hw_irq);
}
static int xive_spapr_debug_show(struct seq_file *m, void *private)
{
struct xive_irq_bitmap *xibm;
char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
memset(buf, 0, PAGE_SIZE);
bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
seq_printf(m, "bitmap #%d: %s", xibm->count, buf);
}
kfree(buf);
return 0;
}
static const struct xive_ops xive_spapr_ops = {
.populate_irq_data = xive_spapr_populate_irq_data,
.configure_irq = xive_spapr_configure_irq,
.get_irq_config = xive_spapr_get_irq_config,
.setup_queue = xive_spapr_setup_queue,
.cleanup_queue = xive_spapr_cleanup_queue,
.match = xive_spapr_match,
.shutdown = xive_spapr_shutdown,
.update_pending = xive_spapr_update_pending,
.setup_cpu = xive_spapr_setup_cpu,
.teardown_cpu = xive_spapr_teardown_cpu,
.sync_source = xive_spapr_sync_source,
.esb_rw = xive_spapr_esb_rw,
#ifdef CONFIG_SMP
.get_ipi = xive_spapr_get_ipi,
.put_ipi = xive_spapr_put_ipi,
.debug_show = xive_spapr_debug_show,
#endif /* CONFIG_SMP */
.name = "spapr",
};
/*
* get max priority from "/ibm,plat-res-int-priorities"
*/
static bool __init xive_get_max_prio(u8 *max_prio)
{
struct device_node *rootdn;
const __be32 *reg;
u32 len;
int prio, found;
rootdn = of_find_node_by_path("/");
if (!rootdn) {
pr_err("not root node found !\n");
return false;
}
reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
of_node_put(rootdn);
if (!reg) {
pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
return false;
}
if (len % (2 * sizeof(u32)) != 0) {
pr_err("invalid 'ibm,plat-res-int-priorities' property\n");
return false;
}
/* HW supports priorities in the range [0-7] and 0xFF is a
* wildcard priority used to mask. We scan the ranges reserved
* by the hypervisor to find the lowest priority we can use.
*/
found = 0xFF;
for (prio = 0; prio < 8; prio++) {
int reserved = 0;
int i;
for (i = 0; i < len / (2 * sizeof(u32)); i++) {
int base = be32_to_cpu(reg[2 * i]);
int range = be32_to_cpu(reg[2 * i + 1]);
if (prio >= base && prio < base + range)
reserved++;
}
if (!reserved)
found = prio;
}
if (found == 0xFF) {
pr_err("no valid priority found in 'ibm,plat-res-int-priorities'\n");
return false;
}
*max_prio = found;
return true;
}
static const u8 *__init get_vec5_feature(unsigned int index)
{
unsigned long root, chosen;
int size;
const u8 *vec5;
root = of_get_flat_dt_root();
chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
if (chosen == -FDT_ERR_NOTFOUND)
return NULL;
vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
if (!vec5)
return NULL;
if (size <= index)
return NULL;
return vec5 + index;
}
static bool __init xive_spapr_disabled(void)
{
const u8 *vec5_xive;
vec5_xive = get_vec5_feature(OV5_INDX(OV5_XIVE_SUPPORT));
if (vec5_xive) {
u8 val;
val = *vec5_xive & OV5_FEAT(OV5_XIVE_SUPPORT);
switch (val) {
case OV5_FEAT(OV5_XIVE_EITHER):
case OV5_FEAT(OV5_XIVE_LEGACY):
break;
case OV5_FEAT(OV5_XIVE_EXPLOIT):
/* Hypervisor only supports XIVE */
if (xive_cmdline_disabled)
pr_warn("WARNING: Ignoring cmdline option xive=off\n");
return false;
default:
pr_warn("%s: Unknown xive support option: 0x%x\n",
__func__, val);
break;
}
}
return xive_cmdline_disabled;
}
bool __init xive_spapr_init(void)
{
struct device_node *np;
struct resource r;
void __iomem *tima;
struct property *prop;
u8 max_prio;
u32 val;
u32 len;
const __be32 *reg;
int i, err;
if (xive_spapr_disabled())
return false;
pr_devel("%s()\n", __func__);
np = of_find_compatible_node(NULL, NULL, "ibm,power-ivpe");
if (!np) {
pr_devel("not found !\n");
return false;
}
pr_devel("Found %s\n", np->full_name);
/* Resource 1 is the OS ring TIMA */
if (of_address_to_resource(np, 1, &r)) {
pr_err("Failed to get thread mgmnt area resource\n");
goto err_put;
}
tima = ioremap(r.start, resource_size(&r));
if (!tima) {
pr_err("Failed to map thread mgmnt area\n");
goto err_put;
}
if (!xive_get_max_prio(&max_prio))
goto err_unmap;
/* Feed the IRQ number allocator with the ranges given in the DT */
reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
if (!reg) {
pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
goto err_unmap;
}
if (len % (2 * sizeof(u32)) != 0) {
pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
goto err_unmap;
}
for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) {
err = xive_irq_bitmap_add(be32_to_cpu(reg[0]),
be32_to_cpu(reg[1]));
if (err < 0)
goto err_mem_free;
}
/* Iterate the EQ sizes and pick one */
of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
xive_queue_shift = val;
if (val == PAGE_SHIFT)
break;
}
/* Initialize XIVE core with our backend */
if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio))
goto err_mem_free;
of_node_put(np);
pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
return true;
err_mem_free:
xive_irq_bitmap_remove_all();
err_unmap:
iounmap(tima);
err_put:
of_node_put(np);
return false;
}
machine_arch_initcall(pseries, xive_core_debug_init);
| linux-master | arch/powerpc/sysdev/xive/spapr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016,2017 IBM Corporation.
*/
#define pr_fmt(fmt) "xive: " fmt
#include <linux/types.h>
#include <linux/irq.h>
#include <linux/debugfs.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/cpumask.h>
#include <linux/mm.h>
#include <linux/kmemleak.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xive.h>
#include <asm/xive-regs.h>
#include <asm/opal.h>
#include <asm/kvm_ppc.h>
#include "xive-internal.h"
static u32 xive_provision_size;
static u32 *xive_provision_chips;
static u32 xive_provision_chip_count;
static u32 xive_queue_shift;
static u32 xive_pool_vps = XIVE_INVALID_VP;
static struct kmem_cache *xive_provision_cache;
static bool xive_has_single_esc;
bool xive_has_save_restore;
int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
{
__be64 flags, eoi_page, trig_page;
__be32 esb_shift, src_chip;
u64 opal_flags;
s64 rc;
memset(data, 0, sizeof(*data));
rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
&esb_shift, &src_chip);
if (rc) {
pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
hw_irq, rc);
return -EINVAL;
}
opal_flags = be64_to_cpu(flags);
if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI2)
data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
if (opal_flags & OPAL_XIVE_IRQ_LSI)
data->flags |= XIVE_IRQ_FLAG_LSI;
data->eoi_page = be64_to_cpu(eoi_page);
data->trig_page = be64_to_cpu(trig_page);
data->esb_shift = be32_to_cpu(esb_shift);
data->src_chip = be32_to_cpu(src_chip);
data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
if (!data->eoi_mmio) {
pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
return -ENOMEM;
}
data->hw_irq = hw_irq;
if (!data->trig_page)
return 0;
if (data->trig_page == data->eoi_page) {
data->trig_mmio = data->eoi_mmio;
return 0;
}
data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
if (!data->trig_mmio) {
pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
{
s64 rc;
for (;;) {
rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
return rc == 0 ? 0 : -ENXIO;
}
EXPORT_SYMBOL_GPL(xive_native_configure_irq);
static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
u32 *sw_irq)
{
s64 rc;
__be64 vp;
__be32 lirq;
rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq);
*target = be64_to_cpu(vp);
*sw_irq = be32_to_cpu(lirq);
return rc == 0 ? 0 : -ENXIO;
}
#define vp_err(vp, fmt, ...) pr_err("VP[0x%x]: " fmt, vp, ##__VA_ARGS__)
/* This can be called multiple time to change a queue configuration */
int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
__be32 *qpage, u32 order, bool can_escalate)
{
s64 rc = 0;
__be64 qeoi_page_be;
__be32 esc_irq_be;
u64 flags, qpage_phys;
/* If there's an actual queue page, clean it */
if (order) {
if (WARN_ON(!qpage))
return -EINVAL;
qpage_phys = __pa(qpage);
} else
qpage_phys = 0;
/* Initialize the rest of the fields */
q->msk = order ? ((1u << (order - 2)) - 1) : 0;
q->idx = 0;
q->toggle = 0;
rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
&qeoi_page_be,
&esc_irq_be,
NULL);
if (rc) {
vp_err(vp_id, "Failed to get queue %d info : %lld\n", prio, rc);
rc = -EIO;
goto fail;
}
q->eoi_phys = be64_to_cpu(qeoi_page_be);
/* Default flags */
flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
/* Escalation needed ? */
if (can_escalate) {
q->esc_irq = be32_to_cpu(esc_irq_be);
flags |= OPAL_XIVE_EQ_ESCALATE;
}
/* Configure and enable the queue in HW */
for (;;) {
rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
if (rc) {
vp_err(vp_id, "Failed to set queue %d info: %lld\n", prio, rc);
rc = -EIO;
} else {
/*
* KVM code requires all of the above to be visible before
* q->qpage is set due to how it manages IPI EOIs
*/
wmb();
q->qpage = qpage;
}
fail:
return rc;
}
EXPORT_SYMBOL_GPL(xive_native_configure_queue);
static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
{
s64 rc;
/* Disable the queue in HW */
for (;;) {
rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
if (rc)
vp_err(vp_id, "Failed to disable queue %d : %lld\n", prio, rc);
}
void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
{
__xive_native_disable_queue(vp_id, q, prio);
}
EXPORT_SYMBOL_GPL(xive_native_disable_queue);
static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
{
struct xive_q *q = &xc->queue[prio];
__be32 *qpage;
qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
if (IS_ERR(qpage))
return PTR_ERR(qpage);
return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
q, prio, qpage, xive_queue_shift, false);
}
static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
{
struct xive_q *q = &xc->queue[prio];
unsigned int alloc_order;
/*
* We use the variant with no iounmap as this is called on exec
* from an IPI and iounmap isn't safe
*/
__xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
alloc_order = xive_alloc_order(xive_queue_shift);
free_pages((unsigned long)q->qpage, alloc_order);
q->qpage = NULL;
}
static bool xive_native_match(struct device_node *node)
{
return of_device_is_compatible(node, "ibm,opal-xive-vc");
}
static s64 opal_xive_allocate_irq(u32 chip_id)
{
s64 irq = opal_xive_allocate_irq_raw(chip_id);
/*
* Old versions of skiboot can incorrectly return 0xffffffff to
* indicate no space, fix it up here.
*/
return irq == 0xffffffff ? OPAL_RESOURCE : irq;
}
#ifdef CONFIG_SMP
static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
{
s64 irq;
/* Allocate an IPI and populate info about it */
for (;;) {
irq = opal_xive_allocate_irq(xc->chip_id);
if (irq == OPAL_BUSY) {
msleep(OPAL_BUSY_DELAY_MS);
continue;
}
if (irq < 0) {
pr_err("Failed to allocate IPI on CPU %d\n", cpu);
return -ENXIO;
}
xc->hw_ipi = irq;
break;
}
return 0;
}
#endif /* CONFIG_SMP */
u32 xive_native_alloc_irq_on_chip(u32 chip_id)
{
s64 rc;
for (;;) {
rc = opal_xive_allocate_irq(chip_id);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
if (rc < 0)
return 0;
return rc;
}
EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
void xive_native_free_irq(u32 irq)
{
for (;;) {
s64 rc = opal_xive_free_irq(irq);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
}
EXPORT_SYMBOL_GPL(xive_native_free_irq);
#ifdef CONFIG_SMP
static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
{
s64 rc;
/* Free the IPI */
if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
for (;;) {
rc = opal_xive_free_irq(xc->hw_ipi);
if (rc == OPAL_BUSY) {
msleep(OPAL_BUSY_DELAY_MS);
continue;
}
xc->hw_ipi = XIVE_BAD_IRQ;
break;
}
}
#endif /* CONFIG_SMP */
static void xive_native_shutdown(void)
{
/* Switch the XIVE to emulation mode */
opal_xive_reset(OPAL_XIVE_MODE_EMU);
}
/*
* Perform an "ack" cycle on the current thread, thus
* grabbing the pending active priorities and updating
* the CPPR to the most favored one.
*/
static void xive_native_update_pending(struct xive_cpu *xc)
{
u8 he, cppr;
u16 ack;
/* Perform the acknowledge hypervisor to register cycle */
ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
/* Synchronize subsequent queue accesses */
mb();
/*
* Grab the CPPR and the "HE" field which indicates the source
* of the hypervisor interrupt (if any)
*/
cppr = ack & 0xff;
he = (ack >> 8) >> 6;
switch(he) {
case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
break;
case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
if (cppr == 0xff)
return;
/* Mark the priority pending */
xc->pending_prio |= 1 << cppr;
/*
* A new interrupt should never have a CPPR less favored
* than our current one.
*/
if (cppr >= xc->cppr)
pr_err("CPU %d odd ack CPPR, got %d at %d\n",
smp_processor_id(), cppr, xc->cppr);
/* Update our idea of what the CPPR is */
xc->cppr = cppr;
break;
case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */
pr_err("CPU %d got unexpected interrupt type HE=%d\n",
smp_processor_id(), he);
return;
}
}
static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
{
xc->chip_id = cpu_to_chip_id(cpu);
}
static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
{
s64 rc;
u32 vp;
__be64 vp_cam_be;
u64 vp_cam;
if (xive_pool_vps == XIVE_INVALID_VP)
return;
/* Check if pool VP already active, if it is, pull it */
if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
/* Enable the pool VP */
vp = xive_pool_vps + cpu;
for (;;) {
rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
if (rc) {
pr_err("Failed to enable pool VP on CPU %d\n", cpu);
return;
}
/* Grab it's CAM value */
rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
if (rc) {
pr_err("Failed to get pool VP info CPU %d\n", cpu);
return;
}
vp_cam = be64_to_cpu(vp_cam_be);
/* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
}
static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
{
s64 rc;
u32 vp;
if (xive_pool_vps == XIVE_INVALID_VP)
return;
/* Pull the pool VP from the CPU */
in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
/* Disable it */
vp = xive_pool_vps + cpu;
for (;;) {
rc = opal_xive_set_vp_info(vp, 0, 0);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
}
void xive_native_sync_source(u32 hw_irq)
{
opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
}
EXPORT_SYMBOL_GPL(xive_native_sync_source);
void xive_native_sync_queue(u32 hw_irq)
{
opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
}
EXPORT_SYMBOL_GPL(xive_native_sync_queue);
#ifdef CONFIG_DEBUG_FS
static int xive_native_debug_create(struct dentry *xive_dir)
{
debugfs_create_bool("save-restore", 0600, xive_dir, &xive_has_save_restore);
return 0;
}
#endif
static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data,
.configure_irq = xive_native_configure_irq,
.get_irq_config = xive_native_get_irq_config,
.setup_queue = xive_native_setup_queue,
.cleanup_queue = xive_native_cleanup_queue,
.match = xive_native_match,
.shutdown = xive_native_shutdown,
.update_pending = xive_native_update_pending,
.prepare_cpu = xive_native_prepare_cpu,
.setup_cpu = xive_native_setup_cpu,
.teardown_cpu = xive_native_teardown_cpu,
.sync_source = xive_native_sync_source,
#ifdef CONFIG_SMP
.get_ipi = xive_native_get_ipi,
.put_ipi = xive_native_put_ipi,
#endif /* CONFIG_SMP */
#ifdef CONFIG_DEBUG_FS
.debug_create = xive_native_debug_create,
#endif /* CONFIG_DEBUG_FS */
.name = "native",
};
static bool __init xive_parse_provisioning(struct device_node *np)
{
int rc;
if (of_property_read_u32(np, "ibm,xive-provision-page-size",
&xive_provision_size) < 0)
return true;
rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
if (rc < 0) {
pr_err("Error %d getting provision chips array\n", rc);
return false;
}
xive_provision_chip_count = rc;
if (rc == 0)
return true;
xive_provision_chips = kcalloc(4, xive_provision_chip_count,
GFP_KERNEL);
if (WARN_ON(!xive_provision_chips))
return false;
rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
xive_provision_chips,
xive_provision_chip_count);
if (rc < 0) {
pr_err("Error %d reading provision chips array\n", rc);
return false;
}
xive_provision_cache = kmem_cache_create("xive-provision",
xive_provision_size,
xive_provision_size,
0, NULL);
if (!xive_provision_cache) {
pr_err("Failed to allocate provision cache\n");
return false;
}
return true;
}
static void __init xive_native_setup_pools(void)
{
/* Allocate a pool big enough */
pr_debug("Allocating VP block for pool size %u\n", nr_cpu_ids);
xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
pr_err("Failed to allocate pool VP, KVM might not function\n");
pr_debug("Pool VPs allocated at 0x%x for %u max CPUs\n",
xive_pool_vps, nr_cpu_ids);
}
u32 xive_native_default_eq_shift(void)
{
return xive_queue_shift;
}
EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
unsigned long xive_tima_os;
EXPORT_SYMBOL_GPL(xive_tima_os);
bool __init xive_native_init(void)
{
struct device_node *np;
struct resource r;
void __iomem *tima;
struct property *prop;
u8 max_prio = 7;
const __be32 *p;
u32 val, cpu;
s64 rc;
if (xive_cmdline_disabled)
return false;
pr_devel("xive_native_init()\n");
np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
if (!np) {
pr_devel("not found !\n");
return false;
}
pr_devel("Found %pOF\n", np);
/* Resource 1 is HV window */
if (of_address_to_resource(np, 1, &r)) {
pr_err("Failed to get thread mgmnt area resource\n");
goto err_put;
}
tima = ioremap(r.start, resource_size(&r));
if (!tima) {
pr_err("Failed to map thread mgmnt area\n");
goto err_put;
}
/* Read number of priorities */
if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
max_prio = val - 1;
/* Iterate the EQ sizes and pick one */
of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
xive_queue_shift = val;
if (val == PAGE_SHIFT)
break;
}
/* Do we support single escalation */
xive_has_single_esc = of_property_read_bool(np, "single-escalation-support");
xive_has_save_restore = of_property_read_bool(np, "vp-save-restore");
/* Configure Thread Management areas for KVM */
for_each_possible_cpu(cpu)
kvmppc_set_xive_tima(cpu, r.start, tima);
/* Resource 2 is OS window */
if (of_address_to_resource(np, 2, &r)) {
pr_err("Failed to get thread mgmnt area resource\n");
goto err_put;
}
xive_tima_os = r.start;
/* Grab size of provisioning pages */
xive_parse_provisioning(np);
/* Switch the XIVE to exploitation mode */
rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
if (rc) {
pr_err("Switch to exploitation mode failed with error %lld\n", rc);
goto err_put;
}
/* Setup some dummy HV pool VPs */
xive_native_setup_pools();
/* Initialize XIVE core with our backend */
if (!xive_core_init(np, &xive_native_ops, tima, TM_QW3_HV_PHYS,
max_prio)) {
opal_xive_reset(OPAL_XIVE_MODE_EMU);
goto err_put;
}
of_node_put(np);
pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
return true;
err_put:
of_node_put(np);
return false;
}
static bool xive_native_provision_pages(void)
{
u32 i;
void *p;
for (i = 0; i < xive_provision_chip_count; i++) {
u32 chip = xive_provision_chips[i];
/*
* XXX TODO: Try to make the allocation local to the node where
* the chip resides.
*/
p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
if (!p) {
pr_err("Failed to allocate provisioning page\n");
return false;
}
kmemleak_ignore(p);
opal_xive_donate_page(chip, __pa(p));
}
return true;
}
u32 xive_native_alloc_vp_block(u32 max_vcpus)
{
s64 rc;
u32 order;
order = fls(max_vcpus) - 1;
if (max_vcpus > (1 << order))
order++;
pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
max_vcpus, order);
for (;;) {
rc = opal_xive_alloc_vp_block(order);
switch (rc) {
case OPAL_BUSY:
msleep(OPAL_BUSY_DELAY_MS);
break;
case OPAL_XIVE_PROVISIONING:
if (!xive_native_provision_pages())
return XIVE_INVALID_VP;
break;
default:
if (rc < 0) {
pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
order, rc);
return XIVE_INVALID_VP;
}
return rc;
}
}
}
EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
void xive_native_free_vp_block(u32 vp_base)
{
s64 rc;
if (vp_base == XIVE_INVALID_VP)
return;
rc = opal_xive_free_vp_block(vp_base);
if (rc < 0)
pr_warn("OPAL error %lld freeing VP block\n", rc);
}
EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
int xive_native_enable_vp(u32 vp_id, bool single_escalation)
{
s64 rc;
u64 flags = OPAL_XIVE_VP_ENABLED;
if (single_escalation)
flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
for (;;) {
rc = opal_xive_set_vp_info(vp_id, flags, 0);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
if (rc)
vp_err(vp_id, "Failed to enable VP : %lld\n", rc);
return rc ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(xive_native_enable_vp);
int xive_native_disable_vp(u32 vp_id)
{
s64 rc;
for (;;) {
rc = opal_xive_set_vp_info(vp_id, 0, 0);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
if (rc)
vp_err(vp_id, "Failed to disable VP : %lld\n", rc);
return rc ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(xive_native_disable_vp);
int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
{
__be64 vp_cam_be;
__be32 vp_chip_id_be;
s64 rc;
rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
if (rc) {
vp_err(vp_id, "Failed to get VP info : %lld\n", rc);
return -EIO;
}
*out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
*out_chip_id = be32_to_cpu(vp_chip_id_be);
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
bool xive_native_has_single_escalation(void)
{
return xive_has_single_esc;
}
EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
bool xive_native_has_save_restore(void)
{
return xive_has_save_restore;
}
EXPORT_SYMBOL_GPL(xive_native_has_save_restore);
int xive_native_get_queue_info(u32 vp_id, u32 prio,
u64 *out_qpage,
u64 *out_qsize,
u64 *out_qeoi_page,
u32 *out_escalate_irq,
u64 *out_qflags)
{
__be64 qpage;
__be64 qsize;
__be64 qeoi_page;
__be32 escalate_irq;
__be64 qflags;
s64 rc;
rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
&qeoi_page, &escalate_irq, &qflags);
if (rc) {
vp_err(vp_id, "failed to get queue %d info : %lld\n", prio, rc);
return -EIO;
}
if (out_qpage)
*out_qpage = be64_to_cpu(qpage);
if (out_qsize)
*out_qsize = be32_to_cpu(qsize);
if (out_qeoi_page)
*out_qeoi_page = be64_to_cpu(qeoi_page);
if (out_escalate_irq)
*out_escalate_irq = be32_to_cpu(escalate_irq);
if (out_qflags)
*out_qflags = be64_to_cpu(qflags);
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
{
__be32 opal_qtoggle;
__be32 opal_qindex;
s64 rc;
rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
&opal_qindex);
if (rc) {
vp_err(vp_id, "failed to get queue %d state : %lld\n", prio, rc);
return -EIO;
}
if (qtoggle)
*qtoggle = be32_to_cpu(opal_qtoggle);
if (qindex)
*qindex = be32_to_cpu(opal_qindex);
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
{
s64 rc;
rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
if (rc) {
vp_err(vp_id, "failed to set queue %d state : %lld\n", prio, rc);
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
bool xive_native_has_queue_state_support(void)
{
return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
}
EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
{
__be64 state;
s64 rc;
rc = opal_xive_get_vp_state(vp_id, &state);
if (rc) {
vp_err(vp_id, "failed to get vp state : %lld\n", rc);
return -EIO;
}
if (out_state)
*out_state = be64_to_cpu(state);
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
machine_arch_initcall(powernv, xive_core_debug_init);
| linux-master | arch/powerpc/sysdev/xive/native.c |
/*
* Interrupt handling for GE FPGA based PIC
*
* Author: Martyn Welch <[email protected]>
*
* 2008 (c) GE Intelligent Platforms Embedded Systems, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include "ge_pic.h"
#define DEBUG
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) do { printk(KERN_DEBUG "gef_pic: " fmt); } while (0)
#else
#define DBG(fmt...) do { } while (0)
#endif
#define GEF_PIC_NUM_IRQS 32
/* Interrupt Controller Interface Registers */
#define GEF_PIC_INTR_STATUS 0x0000
#define GEF_PIC_INTR_MASK(cpu) (0x0010 + (0x4 * cpu))
#define GEF_PIC_CPU0_INTR_MASK GEF_PIC_INTR_MASK(0)
#define GEF_PIC_CPU1_INTR_MASK GEF_PIC_INTR_MASK(1)
#define GEF_PIC_MCP_MASK(cpu) (0x0018 + (0x4 * cpu))
#define GEF_PIC_CPU0_MCP_MASK GEF_PIC_MCP_MASK(0)
#define GEF_PIC_CPU1_MCP_MASK GEF_PIC_MCP_MASK(1)
static DEFINE_RAW_SPINLOCK(gef_pic_lock);
static void __iomem *gef_pic_irq_reg_base;
static struct irq_domain *gef_pic_irq_host;
static int gef_pic_cascade_irq;
/*
* Interrupt Controller Handling
*
* The interrupt controller handles interrupts for most on board interrupts,
* apart from PCI interrupts. For example on SBC610:
*
* 17:31 RO Reserved
* 16 RO PCI Express Doorbell 3 Status
* 15 RO PCI Express Doorbell 2 Status
* 14 RO PCI Express Doorbell 1 Status
* 13 RO PCI Express Doorbell 0 Status
* 12 RO Real Time Clock Interrupt Status
* 11 RO Temperature Interrupt Status
* 10 RO Temperature Critical Interrupt Status
* 9 RO Ethernet PHY1 Interrupt Status
* 8 RO Ethernet PHY3 Interrupt Status
* 7 RO PEX8548 Interrupt Status
* 6 RO Reserved
* 5 RO Watchdog 0 Interrupt Status
* 4 RO Watchdog 1 Interrupt Status
* 3 RO AXIS Message FIFO A Interrupt Status
* 2 RO AXIS Message FIFO B Interrupt Status
* 1 RO AXIS Message FIFO C Interrupt Status
* 0 RO AXIS Message FIFO D Interrupt Status
*
* Interrupts can be forwarded to one of two output lines. Nothing
* clever is done, so if the masks are incorrectly set, a single input
* interrupt could generate interrupts on both output lines!
*
* The dual lines are there to allow the chained interrupts to be easily
* passed into two different cores. We currently do not use this functionality
* in this driver.
*
* Controller can also be configured to generate Machine checks (MCP), again on
* two lines, to be attached to two different cores. It is suggested that these
* should be masked out.
*/
static void gef_pic_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int cascade_irq;
/*
* See if we actually have an interrupt, call generic handling code if
* we do.
*/
cascade_irq = gef_pic_get_irq();
if (cascade_irq)
generic_handle_irq(cascade_irq);
chip->irq_eoi(&desc->irq_data);
}
static void gef_pic_mask(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq = irqd_to_hwirq(d);
u32 mask;
raw_spin_lock_irqsave(&gef_pic_lock, flags);
mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
mask &= ~(1 << hwirq);
out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask);
raw_spin_unlock_irqrestore(&gef_pic_lock, flags);
}
static void gef_pic_mask_ack(struct irq_data *d)
{
/* Don't think we actually have to do anything to ack an interrupt,
* we just need to clear down the devices interrupt and it will go away
*/
gef_pic_mask(d);
}
static void gef_pic_unmask(struct irq_data *d)
{
unsigned long flags;
unsigned int hwirq = irqd_to_hwirq(d);
u32 mask;
raw_spin_lock_irqsave(&gef_pic_lock, flags);
mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
mask |= (1 << hwirq);
out_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0), mask);
raw_spin_unlock_irqrestore(&gef_pic_lock, flags);
}
static struct irq_chip gef_pic_chip = {
.name = "gefp",
.irq_mask = gef_pic_mask,
.irq_mask_ack = gef_pic_mask_ack,
.irq_unmask = gef_pic_unmask,
};
/* When an interrupt is being configured, this call allows some flexibility
* in deciding which irq_chip structure is used
*/
static int gef_pic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hwirq)
{
/* All interrupts are LEVEL sensitive */
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &gef_pic_chip, handle_level_irq);
return 0;
}
static int gef_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
*out_hwirq = intspec[0];
if (intsize > 1)
*out_flags = intspec[1];
else
*out_flags = IRQ_TYPE_LEVEL_HIGH;
return 0;
}
static const struct irq_domain_ops gef_pic_host_ops = {
.map = gef_pic_host_map,
.xlate = gef_pic_host_xlate,
};
/*
* Initialisation of PIC, this should be called in BSP
*/
void __init gef_pic_init(struct device_node *np)
{
unsigned long flags;
/* Map the devices registers into memory */
gef_pic_irq_reg_base = of_iomap(np, 0);
raw_spin_lock_irqsave(&gef_pic_lock, flags);
/* Initialise everything as masked. */
out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_INTR_MASK, 0);
out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_INTR_MASK, 0);
out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU0_MCP_MASK, 0);
out_be32(gef_pic_irq_reg_base + GEF_PIC_CPU1_MCP_MASK, 0);
raw_spin_unlock_irqrestore(&gef_pic_lock, flags);
/* Map controller */
gef_pic_cascade_irq = irq_of_parse_and_map(np, 0);
if (!gef_pic_cascade_irq) {
printk(KERN_ERR "SBC610: failed to map cascade interrupt");
return;
}
/* Setup an irq_domain structure */
gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS,
&gef_pic_host_ops, NULL);
if (gef_pic_irq_host == NULL)
return;
/* Chain with parent controller */
irq_set_chained_handler(gef_pic_cascade_irq, gef_pic_cascade);
}
/*
* This is called when we receive an interrupt with apparently comes from this
* chip - check, returning the highest interrupt generated or return 0.
*/
unsigned int gef_pic_get_irq(void)
{
u32 cause, mask, active;
unsigned int virq = 0;
int hwirq;
cause = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_STATUS);
mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0));
active = cause & mask;
if (active) {
for (hwirq = GEF_PIC_NUM_IRQS - 1; hwirq > -1; hwirq--) {
if (active & (0x1 << hwirq))
break;
}
virq = irq_linear_revmap(gef_pic_irq_host,
(irq_hw_number_t)hwirq);
}
return virq;
}
| linux-master | arch/powerpc/sysdev/ge/ge_pic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2011 IBM Corporation.
*/
#include <linux/types.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/debugfs.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/rtas.h>
#include <asm/xics.h>
#include <asm/firmware.h>
/* Globals common to all ICP/ICS implementations */
const struct icp_ops *icp_ops;
unsigned int xics_default_server = 0xff;
unsigned int xics_default_distrib_server = 0;
unsigned int xics_interrupt_server_size = 8;
DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
struct irq_domain *xics_host;
static struct ics *xics_ics;
void xics_update_irq_servers(void)
{
int i, j;
struct device_node *np;
u32 ilen;
const __be32 *ireg;
u32 hcpuid;
/* Find the server numbers for the boot cpu. */
np = of_get_cpu_node(boot_cpuid, NULL);
BUG_ON(!np);
hcpuid = get_hard_smp_processor_id(boot_cpuid);
xics_default_server = xics_default_distrib_server = hcpuid;
pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server);
ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
if (!ireg) {
of_node_put(np);
return;
}
i = ilen / sizeof(int);
/* Global interrupt distribution server is specified in the last
* entry of "ibm,ppc-interrupt-gserver#s" property. Get the last
* entry fom this property for current boot cpu id and use it as
* default distribution server
*/
for (j = 0; j < i; j += 2) {
if (be32_to_cpu(ireg[j]) == hcpuid) {
xics_default_distrib_server = be32_to_cpu(ireg[j+1]);
break;
}
}
pr_devel("xics: xics_default_distrib_server = 0x%x\n",
xics_default_distrib_server);
of_node_put(np);
}
/* GIQ stuff, currently only supported on RTAS setups, will have
* to be sorted properly for bare metal
*/
void xics_set_cpu_giq(unsigned int gserver, unsigned int join)
{
#ifdef CONFIG_PPC_RTAS
int index;
int status;
if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL))
return;
index = (1UL << xics_interrupt_server_size) - 1 - gserver;
status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join);
WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n",
GLOBAL_INTERRUPT_QUEUE, index, join, status);
#endif
}
void xics_setup_cpu(void)
{
icp_ops->set_priority(LOWEST_PRIORITY);
xics_set_cpu_giq(xics_default_distrib_server, 1);
}
void xics_mask_unknown_vec(unsigned int vec)
{
pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec);
if (WARN_ON(!xics_ics))
return;
xics_ics->mask_unknown(xics_ics, vec);
}
#ifdef CONFIG_SMP
static void __init xics_request_ipi(void)
{
unsigned int ipi;
ipi = irq_create_mapping(xics_host, XICS_IPI);
BUG_ON(!ipi);
/*
* IPIs are marked IRQF_PERCPU. The handler was set in map.
*/
BUG_ON(request_irq(ipi, icp_ops->ipi_action,
IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
}
void __init xics_smp_probe(void)
{
/* Register all the IPIs */
xics_request_ipi();
/* Setup cause_ipi callback based on which ICP is used */
smp_ops->cause_ipi = icp_ops->cause_ipi;
}
#endif /* CONFIG_SMP */
noinstr void xics_teardown_cpu(void)
{
struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
/*
* we have to reset the cppr index to 0 because we're
* not going to return from the IPI
*/
os_cppr->index = 0;
icp_ops->set_priority(0);
icp_ops->teardown_cpu();
}
noinstr void xics_kexec_teardown_cpu(int secondary)
{
xics_teardown_cpu();
icp_ops->flush_ipi();
/*
* Some machines need to have at least one cpu in the GIQ,
* so leave the master cpu in the group.
*/
if (secondary)
xics_set_cpu_giq(xics_default_distrib_server, 0);
}
#ifdef CONFIG_HOTPLUG_CPU
/* Interrupts are disabled. */
void xics_migrate_irqs_away(void)
{
int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
unsigned int irq, virq;
struct irq_desc *desc;
pr_debug("%s: CPU %u\n", __func__, cpu);
/* If we used to be the default server, move to the new "boot_cpuid" */
if (hw_cpu == xics_default_server)
xics_update_irq_servers();
/* Reject any interrupt that was queued to us... */
icp_ops->set_priority(0);
/* Remove ourselves from the global interrupt queue */
xics_set_cpu_giq(xics_default_distrib_server, 0);
for_each_irq_desc(virq, desc) {
struct irq_chip *chip;
long server;
unsigned long flags;
struct irq_data *irqd;
/* We can't set affinity on ISA interrupts */
if (virq < NR_IRQS_LEGACY)
continue;
/* We only need to migrate enabled IRQS */
if (!desc->action)
continue;
/* We need a mapping in the XICS IRQ domain */
irqd = irq_domain_get_irq_data(xics_host, virq);
if (!irqd)
continue;
irq = irqd_to_hwirq(irqd);
/* We need to get IPIs still. */
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
continue;
chip = irq_desc_get_chip(desc);
if (!chip || !chip->irq_set_affinity)
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
/* Locate interrupt server */
server = xics_ics->get_server(xics_ics, irq);
if (server < 0) {
pr_err("%s: Can't find server for irq %d/%x\n",
__func__, virq, irq);
goto unlock;
}
/* We only support delivery to all cpus or to one cpu.
* The irq has to be migrated only in the single cpu
* case.
*/
if (server != hw_cpu)
goto unlock;
/* This is expected during cpu offline. */
if (cpu_online(cpu))
pr_warn("IRQ %u affinity broken off cpu %u\n",
virq, cpu);
/* Reset affinity to all cpus */
raw_spin_unlock_irqrestore(&desc->lock, flags);
irq_set_affinity(virq, cpu_all_mask);
continue;
unlock:
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
/* Allow "sufficient" time to drop any inflight IRQ's */
mdelay(5);
/*
* Allow IPIs again. This is done at the very end, after migrating all
* interrupts, the expectation is that we'll only get woken up by an IPI
* interrupt beyond this point, but leave externals masked just to be
* safe. If we're using icp-opal this may actually allow all
* interrupts anyway, but that should be OK.
*/
icp_ops->set_priority(DEFAULT_PRIORITY);
}
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_SMP
/*
* For the moment we only implement delivery to all cpus or one cpu.
*
* If the requested affinity is cpu_all_mask, we set global affinity.
* If not we set it to the first cpu in the mask, even if multiple cpus
* are set. This is so things like irqbalance (which set core and package
* wide affinities) do the right thing.
*
* We need to fix this to implement support for the links
*/
int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
unsigned int strict_check)
{
if (!distribute_irqs)
return xics_default_server;
if (!cpumask_subset(cpu_possible_mask, cpumask)) {
int server = cpumask_first_and(cpu_online_mask, cpumask);
if (server < nr_cpu_ids)
return get_hard_smp_processor_id(server);
if (strict_check)
return -1;
}
/*
* Workaround issue with some versions of JS20 firmware that
* deliver interrupts to cpus which haven't been started. This
* happens when using the maxcpus= boot option.
*/
if (cpumask_equal(cpu_online_mask, cpu_present_mask))
return xics_default_distrib_server;
return xics_default_server;
}
#endif /* CONFIG_SMP */
static int xics_host_match(struct irq_domain *h, struct device_node *node,
enum irq_domain_bus_token bus_token)
{
if (WARN_ON(!xics_ics))
return 0;
return xics_ics->host_match(xics_ics, node) ? 1 : 0;
}
/* Dummies */
static void xics_ipi_unmask(struct irq_data *d) { }
static void xics_ipi_mask(struct irq_data *d) { }
static struct irq_chip xics_ipi_chip = {
.name = "XICS",
.irq_eoi = NULL, /* Patched at init time */
.irq_mask = xics_ipi_mask,
.irq_unmask = xics_ipi_unmask,
};
static int xics_host_map(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
{
pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hwirq);
/*
* Mark interrupts as edge sensitive by default so that resend
* actually works. The device-tree parsing will turn the LSIs
* back to level.
*/
irq_clear_status_flags(virq, IRQ_LEVEL);
/* Don't call into ICS for IPIs */
if (hwirq == XICS_IPI) {
irq_set_chip_and_handler(virq, &xics_ipi_chip,
handle_percpu_irq);
return 0;
}
if (WARN_ON(!xics_ics))
return -EINVAL;
if (xics_ics->check(xics_ics, hwirq))
return -EINVAL;
/* Let the ICS be the chip data for the XICS domain. For ICS native */
irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
xics_ics, handle_fasteoi_irq, NULL, NULL);
return 0;
}
static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq, unsigned int *out_flags)
{
*out_hwirq = intspec[0];
/*
* If intsize is at least 2, we look for the type in the second cell,
* we assume the LSB indicates a level interrupt.
*/
if (intsize > 1) {
if (intspec[1] & 1)
*out_flags = IRQ_TYPE_LEVEL_LOW;
else
*out_flags = IRQ_TYPE_EDGE_RISING;
} else
*out_flags = IRQ_TYPE_LEVEL_LOW;
return 0;
}
int xics_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
/*
* We only support these. This has really no effect other than setting
* the corresponding descriptor bits mind you but those will in turn
* affect the resend function when re-enabling an edge interrupt.
*
* Set set the default to edge as explained in map().
*/
if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_EDGE_RISING;
if (flow_type != IRQ_TYPE_EDGE_RISING &&
flow_type != IRQ_TYPE_LEVEL_LOW)
return -EINVAL;
irqd_set_trigger_type(d, flow_type);
return IRQ_SET_MASK_OK_NOCOPY;
}
int xics_retrigger(struct irq_data *data)
{
/*
* We need to push a dummy CPPR when retriggering, since the subsequent
* EOI will try to pop it. Passing 0 works, as the function hard codes
* the priority value anyway.
*/
xics_push_cppr(0);
/* Tell the core to do a soft retrigger */
return 0;
}
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
static int xics_host_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
return xics_host_xlate(d, to_of_node(fwspec->fwnode), fwspec->param,
fwspec->param_count, hwirq, type);
}
static int xics_host_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct irq_fwspec *fwspec = arg;
irq_hw_number_t hwirq;
unsigned int type = IRQ_TYPE_NONE;
int i, rc;
rc = xics_host_domain_translate(domain, fwspec, &hwirq, &type);
if (rc)
return rc;
pr_debug("%s %d/%lx #%d\n", __func__, virq, hwirq, nr_irqs);
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, hwirq + i, xics_ics->chip,
xics_ics, handle_fasteoi_irq, NULL, NULL);
return 0;
}
static void xics_host_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
pr_debug("%s %d #%d\n", __func__, virq, nr_irqs);
}
#endif
static const struct irq_domain_ops xics_host_ops = {
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
.alloc = xics_host_domain_alloc,
.free = xics_host_domain_free,
.translate = xics_host_domain_translate,
#endif
.match = xics_host_match,
.map = xics_host_map,
.xlate = xics_host_xlate,
};
static int __init xics_allocate_domain(void)
{
struct fwnode_handle *fn;
fn = irq_domain_alloc_named_fwnode("XICS");
if (!fn)
return -ENOMEM;
xics_host = irq_domain_create_tree(fn, &xics_host_ops, NULL);
if (!xics_host) {
irq_domain_free_fwnode(fn);
return -ENOMEM;
}
irq_set_default_host(xics_host);
return 0;
}
void __init xics_register_ics(struct ics *ics)
{
if (WARN_ONCE(xics_ics, "XICS: Source Controller is already defined !"))
return;
xics_ics = ics;
}
static void __init xics_get_server_size(void)
{
struct device_node *np;
const __be32 *isize;
/* We fetch the interrupt server size from the first ICS node
* we find if any
*/
np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics");
if (!np)
return;
isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
if (isize)
xics_interrupt_server_size = be32_to_cpu(*isize);
of_node_put(np);
}
void __init xics_init(void)
{
int rc = -1;
/* Fist locate ICP */
if (firmware_has_feature(FW_FEATURE_LPAR))
rc = icp_hv_init();
if (rc < 0) {
rc = icp_native_init();
if (rc == -ENODEV)
rc = icp_opal_init();
}
if (rc < 0) {
pr_warn("XICS: Cannot find a Presentation Controller !\n");
return;
}
/* Copy get_irq callback over to ppc_md */
ppc_md.get_irq = icp_ops->get_irq;
/* Patch up IPI chip EOI */
xics_ipi_chip.irq_eoi = icp_ops->eoi;
/* Now locate ICS */
rc = ics_rtas_init();
if (rc < 0)
rc = ics_opal_init();
if (rc < 0)
rc = ics_native_init();
if (rc < 0)
pr_warn("XICS: Cannot find a Source Controller !\n");
/* Initialize common bits */
xics_get_server_size();
xics_update_irq_servers();
rc = xics_allocate_domain();
if (rc < 0)
pr_err("XICS: Failed to create IRQ domain");
xics_setup_cpu();
}
| linux-master | arch/powerpc/sysdev/xics/xics-common.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2011 IBM Corporation.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xics.h>
#include <asm/kvm_ppc.h>
#include <asm/dbell.h>
struct icp_ipl {
union {
u32 word;
u8 bytes[4];
} xirr_poll;
union {
u32 word;
u8 bytes[4];
} xirr;
u32 dummy;
union {
u32 word;
u8 bytes[4];
} qirr;
u32 link_a;
u32 link_b;
u32 link_c;
};
static struct icp_ipl __iomem *icp_native_regs[NR_CPUS];
static inline unsigned int icp_native_get_xirr(void)
{
int cpu = smp_processor_id();
unsigned int xirr;
/* Handled an interrupt latched by KVM */
xirr = kvmppc_get_xics_latch();
if (xirr)
return xirr;
return in_be32(&icp_native_regs[cpu]->xirr.word);
}
static inline void icp_native_set_xirr(unsigned int value)
{
int cpu = smp_processor_id();
out_be32(&icp_native_regs[cpu]->xirr.word, value);
}
static inline void icp_native_set_cppr(u8 value)
{
int cpu = smp_processor_id();
out_8(&icp_native_regs[cpu]->xirr.bytes[0], value);
}
static inline void icp_native_set_qirr(int n_cpu, u8 value)
{
out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value);
}
static void icp_native_set_cpu_priority(unsigned char cppr)
{
xics_set_base_cppr(cppr);
icp_native_set_cppr(cppr);
iosync();
}
void icp_native_eoi(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
iosync();
icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq);
}
static void icp_native_teardown_cpu(void)
{
int cpu = smp_processor_id();
/* Clear any pending IPI */
icp_native_set_qirr(cpu, 0xff);
}
static void icp_native_flush_ipi(void)
{
/* We take the ipi irq but and never return so we
* need to EOI the IPI, but want to leave our priority 0
*
* should we check all the other interrupts too?
* should we be flagging idle loop instead?
* or creating some task to be scheduled?
*/
icp_native_set_xirr((0x00 << 24) | XICS_IPI);
}
static unsigned int icp_native_get_irq(void)
{
unsigned int xirr = icp_native_get_xirr();
unsigned int vec = xirr & 0x00ffffff;
unsigned int irq;
if (vec == XICS_IRQ_SPURIOUS)
return 0;
irq = irq_find_mapping(xics_host, vec);
if (likely(irq)) {
xics_push_cppr(vec);
return irq;
}
/* We don't have a linux mapping, so have rtas mask it. */
xics_mask_unknown_vec(vec);
/* We might learn about it later, so EOI it */
icp_native_set_xirr(xirr);
return 0;
}
#ifdef CONFIG_SMP
static void icp_native_cause_ipi(int cpu)
{
kvmppc_set_host_ipi(cpu);
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
void icp_native_cause_ipi_rm(int cpu)
{
/*
* Currently not used to send IPIs to another CPU
* on the same core. Only caller is KVM real mode.
* Need the physical address of the XICS to be
* previously saved in kvm_hstate in the paca.
*/
void __iomem *xics_phys;
/*
* Just like the cause_ipi functions, it is required to
* include a full barrier before causing the IPI.
*/
xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
mb();
__raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
}
#endif
/*
* Called when an interrupt is received on an off-line CPU to
* clear the interrupt, so that the CPU can go back to nap mode.
*/
void icp_native_flush_interrupt(void)
{
unsigned int xirr = icp_native_get_xirr();
unsigned int vec = xirr & 0x00ffffff;
if (vec == XICS_IRQ_SPURIOUS)
return;
if (vec == XICS_IPI) {
/* Clear pending IPI */
int cpu = smp_processor_id();
kvmppc_clear_host_ipi(cpu);
icp_native_set_qirr(cpu, 0xff);
} else {
pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n",
vec);
xics_mask_unknown_vec(vec);
}
/* EOI the interrupt */
icp_native_set_xirr(xirr);
}
void xics_wake_cpu(int cpu)
{
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
EXPORT_SYMBOL_GPL(xics_wake_cpu);
static irqreturn_t icp_native_ipi_action(int irq, void *dev_id)
{
int cpu = smp_processor_id();
kvmppc_clear_host_ipi(cpu);
icp_native_set_qirr(cpu, 0xff);
return smp_ipi_demux();
}
#endif /* CONFIG_SMP */
static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr,
unsigned long size)
{
char *rname;
int i, cpu = -1;
/* This may look gross but it's good enough for now, we don't quite
* have a hard -> linux processor id matching.
*/
for_each_possible_cpu(i) {
if (!cpu_present(i))
continue;
if (hw_id == get_hard_smp_processor_id(i)) {
cpu = i;
break;
}
}
/* Fail, skip that CPU. Don't print, it's normal, some XICS come up
* with way more entries in there than you have CPUs
*/
if (cpu == -1)
return 0;
rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation",
cpu, hw_id);
if (!request_mem_region(addr, size, rname)) {
pr_warn("icp_native: Could not reserve ICP MMIO for CPU %d, interrupt server #0x%x\n",
cpu, hw_id);
return -EBUSY;
}
icp_native_regs[cpu] = ioremap(addr, size);
kvmppc_set_xics_phys(cpu, addr);
if (!icp_native_regs[cpu]) {
pr_warn("icp_native: Failed ioremap for CPU %d, interrupt server #0x%x, addr %#lx\n",
cpu, hw_id, addr);
release_mem_region(addr, size);
return -ENOMEM;
}
return 0;
}
static int __init icp_native_init_one_node(struct device_node *np,
unsigned int *indx)
{
unsigned int ilen;
const __be32 *ireg;
int i;
int num_reg;
int num_servers = 0;
/* This code does the theorically broken assumption that the interrupt
* server numbers are the same as the hard CPU numbers.
* This happens to be the case so far but we are playing with fire...
* should be fixed one of these days. -BenH.
*/
ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen);
/* Do that ever happen ? we'll know soon enough... but even good'old
* f80 does have that property ..
*/
WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32)));
if (ireg) {
*indx = of_read_number(ireg, 1);
if (ilen >= 2*sizeof(u32))
num_servers = of_read_number(ireg + 1, 1);
}
num_reg = of_address_count(np);
if (num_servers && (num_servers != num_reg)) {
pr_err("icp_native: ICP reg len (%d) != num servers (%d)",
num_reg, num_servers);
return -1;
}
for (i = 0; i < num_reg; i++) {
struct resource r;
int err;
err = of_address_to_resource(np, i, &r);
if (err) {
pr_err("icp_native: Could not translate ICP MMIO"
" for interrupt server 0x%x (%d)\n", *indx, err);
return -1;
}
if (icp_native_map_one_cpu(*indx, r.start, resource_size(&r)))
return -1;
(*indx)++;
}
return 0;
}
static const struct icp_ops icp_native_ops = {
.get_irq = icp_native_get_irq,
.eoi = icp_native_eoi,
.set_priority = icp_native_set_cpu_priority,
.teardown_cpu = icp_native_teardown_cpu,
.flush_ipi = icp_native_flush_ipi,
#ifdef CONFIG_SMP
.ipi_action = icp_native_ipi_action,
.cause_ipi = icp_native_cause_ipi,
#endif
};
int __init icp_native_init(void)
{
struct device_node *np;
u32 indx = 0;
int found = 0;
for_each_compatible_node(np, NULL, "ibm,ppc-xicp")
if (icp_native_init_one_node(np, &indx) == 0)
found = 1;
if (!found) {
for_each_node_by_type(np,
"PowerPC-External-Interrupt-Presentation") {
if (icp_native_init_one_node(np, &indx) == 0)
found = 1;
}
}
if (found == 0)
return -ENODEV;
icp_ops = &icp_native_ops;
return 0;
}
| linux-master | arch/powerpc/sysdev/xics/icp-native.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ICS backend for OPAL managed interrupts.
*
* Copyright 2011 IBM Corp.
*/
#undef DEBUG
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/msi.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xics.h>
#include <asm/opal.h>
#include <asm/firmware.h>
static int ics_opal_mangle_server(int server)
{
/* No link for now */
return server << 2;
}
static int ics_opal_unmangle_server(int server)
{
/* No link for now */
return server >> 2;
}
static void ics_opal_unmask_irq(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
int64_t rc;
int server;
pr_devel("ics-hal: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq);
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return;
server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
server = ics_opal_mangle_server(server);
rc = opal_set_xive(hw_irq, server, DEFAULT_PRIORITY);
if (rc != OPAL_SUCCESS)
pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)"
" error %lld\n",
__func__, d->irq, hw_irq, server, rc);
}
static unsigned int ics_opal_startup(struct irq_data *d)
{
ics_opal_unmask_irq(d);
return 0;
}
static void ics_opal_mask_real_irq(unsigned int hw_irq)
{
int server = ics_opal_mangle_server(xics_default_server);
int64_t rc;
if (hw_irq == XICS_IPI)
return;
/* Have to set XIVE to 0xff to be able to remove a slot */
rc = opal_set_xive(hw_irq, server, 0xff);
if (rc != OPAL_SUCCESS)
pr_err("%s: opal_set_xive(0xff) irq=%u returned %lld\n",
__func__, hw_irq, rc);
}
static void ics_opal_mask_irq(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
pr_devel("ics-hal: mask virq %d [hw 0x%x]\n", d->irq, hw_irq);
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return;
ics_opal_mask_real_irq(hw_irq);
}
static int ics_opal_set_affinity(struct irq_data *d,
const struct cpumask *cpumask,
bool force)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
__be16 oserver;
int16_t server;
int8_t priority;
int64_t rc;
int wanted_server;
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return -1;
rc = opal_get_xive(hw_irq, &oserver, &priority);
if (rc != OPAL_SUCCESS) {
pr_err("%s: opal_get_xive(irq=%d [hw 0x%x]) error %lld\n",
__func__, d->irq, hw_irq, rc);
return -1;
}
wanted_server = xics_get_irq_server(d->irq, cpumask, 1);
if (wanted_server < 0) {
pr_warn("%s: No online cpus in the mask %*pb for irq %d\n",
__func__, cpumask_pr_args(cpumask), d->irq);
return -1;
}
server = ics_opal_mangle_server(wanted_server);
pr_debug("ics-hal: set-affinity irq %d [hw 0x%x] server: 0x%x/0x%x\n",
d->irq, hw_irq, wanted_server, server);
rc = opal_set_xive(hw_irq, server, priority);
if (rc != OPAL_SUCCESS) {
pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)"
" error %lld\n",
__func__, d->irq, hw_irq, server, rc);
return -1;
}
return IRQ_SET_MASK_OK;
}
static struct irq_chip ics_opal_irq_chip = {
.name = "OPAL ICS",
.irq_startup = ics_opal_startup,
.irq_mask = ics_opal_mask_irq,
.irq_unmask = ics_opal_unmask_irq,
.irq_eoi = NULL, /* Patched at init time */
.irq_set_affinity = ics_opal_set_affinity,
.irq_set_type = xics_set_irq_type,
.irq_retrigger = xics_retrigger,
};
static int ics_opal_host_match(struct ics *ics, struct device_node *node)
{
return 1;
}
static int ics_opal_check(struct ics *ics, unsigned int hw_irq)
{
int64_t rc;
__be16 server;
int8_t priority;
if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS))
return -EINVAL;
/* Check if HAL knows about this interrupt */
rc = opal_get_xive(hw_irq, &server, &priority);
if (rc != OPAL_SUCCESS)
return -ENXIO;
return 0;
}
static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec)
{
int64_t rc;
__be16 server;
int8_t priority;
/* Check if HAL knows about this interrupt */
rc = opal_get_xive(vec, &server, &priority);
if (rc != OPAL_SUCCESS)
return;
ics_opal_mask_real_irq(vec);
}
static long ics_opal_get_server(struct ics *ics, unsigned long vec)
{
int64_t rc;
__be16 server;
int8_t priority;
/* Check if HAL knows about this interrupt */
rc = opal_get_xive(vec, &server, &priority);
if (rc != OPAL_SUCCESS)
return -1;
return ics_opal_unmangle_server(be16_to_cpu(server));
}
/* Only one global & state struct ics */
static struct ics ics_hal = {
.check = ics_opal_check,
.mask_unknown = ics_opal_mask_unknown,
.get_server = ics_opal_get_server,
.host_match = ics_opal_host_match,
.chip = &ics_opal_irq_chip,
};
int __init ics_opal_init(void)
{
if (!firmware_has_feature(FW_FEATURE_OPAL))
return -ENODEV;
/* We need to patch our irq chip's EOI to point to the
* right ICP
*/
ics_opal_irq_chip.irq_eoi = icp_ops->eoi;
/* Register ourselves */
xics_register_ics(&ics_hal);
pr_info("ICS OPAL backend registered\n");
return 0;
}
| linux-master | arch/powerpc/sysdev/xics/ics-opal.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2016 IBM Corporation.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xics.h>
#include <asm/io.h>
#include <asm/opal.h>
#include <asm/kvm_ppc.h>
static void icp_opal_teardown_cpu(void)
{
int hw_cpu = hard_smp_processor_id();
/* Clear any pending IPI */
opal_int_set_mfrr(hw_cpu, 0xff);
}
static void icp_opal_flush_ipi(void)
{
/*
* We take the ipi irq but and never return so we need to EOI the IPI,
* but want to leave our priority 0.
*
* Should we check all the other interrupts too?
* Should we be flagging idle loop instead?
* Or creating some task to be scheduled?
*/
if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
force_external_irq_replay();
}
static unsigned int icp_opal_get_xirr(void)
{
unsigned int kvm_xirr;
__be32 hw_xirr;
int64_t rc;
/* Handle an interrupt latched by KVM first */
kvm_xirr = kvmppc_get_xics_latch();
if (kvm_xirr)
return kvm_xirr;
/* Then ask OPAL */
rc = opal_int_get_xirr(&hw_xirr, false);
if (rc < 0)
return 0;
return be32_to_cpu(hw_xirr);
}
static unsigned int icp_opal_get_irq(void)
{
unsigned int xirr;
unsigned int vec;
unsigned int irq;
xirr = icp_opal_get_xirr();
vec = xirr & 0x00ffffff;
if (vec == XICS_IRQ_SPURIOUS)
return 0;
irq = irq_find_mapping(xics_host, vec);
if (likely(irq)) {
xics_push_cppr(vec);
return irq;
}
/* We don't have a linux mapping, so have rtas mask it. */
xics_mask_unknown_vec(vec);
/* We might learn about it later, so EOI it */
if (opal_int_eoi(xirr) > 0)
force_external_irq_replay();
return 0;
}
static void icp_opal_set_cpu_priority(unsigned char cppr)
{
/*
* Here be dragons. The caller has asked to allow only IPI's and not
* external interrupts. But OPAL XIVE doesn't support that. So instead
* of allowing no interrupts allow all. That's still not right, but
* currently the only caller who does this is xics_migrate_irqs_away()
* and it works in that case.
*/
if (cppr >= DEFAULT_PRIORITY)
cppr = LOWEST_PRIORITY;
xics_set_base_cppr(cppr);
opal_int_set_cppr(cppr);
iosync();
}
static void icp_opal_eoi(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
int64_t rc;
iosync();
rc = opal_int_eoi((xics_pop_cppr() << 24) | hw_irq);
/*
* EOI tells us whether there are more interrupts to fetch.
*
* Some HW implementations might not be able to send us another
* external interrupt in that case, so we force a replay.
*/
if (rc > 0)
force_external_irq_replay();
}
#ifdef CONFIG_SMP
static void icp_opal_cause_ipi(int cpu)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
kvmppc_set_host_ipi(cpu);
opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
}
static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
{
int cpu = smp_processor_id();
kvmppc_clear_host_ipi(cpu);
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
return smp_ipi_demux();
}
/*
* Called when an interrupt is received on an off-line CPU to
* clear the interrupt, so that the CPU can go back to nap mode.
*/
void icp_opal_flush_interrupt(void)
{
unsigned int xirr;
unsigned int vec;
do {
xirr = icp_opal_get_xirr();
vec = xirr & 0x00ffffff;
if (vec == XICS_IRQ_SPURIOUS)
break;
if (vec == XICS_IPI) {
/* Clear pending IPI */
int cpu = smp_processor_id();
kvmppc_clear_host_ipi(cpu);
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
} else {
pr_err("XICS: hw interrupt 0x%x to offline cpu, "
"disabling\n", vec);
xics_mask_unknown_vec(vec);
}
/* EOI the interrupt */
} while (opal_int_eoi(xirr) > 0);
}
#endif /* CONFIG_SMP */
static const struct icp_ops icp_opal_ops = {
.get_irq = icp_opal_get_irq,
.eoi = icp_opal_eoi,
.set_priority = icp_opal_set_cpu_priority,
.teardown_cpu = icp_opal_teardown_cpu,
.flush_ipi = icp_opal_flush_ipi,
#ifdef CONFIG_SMP
.ipi_action = icp_opal_ipi_action,
.cause_ipi = icp_opal_cause_ipi,
#endif
};
int __init icp_opal_init(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
if (!np)
return -ENODEV;
icp_ops = &icp_opal_ops;
printk("XICS: Using OPAL ICP fallbacks\n");
of_node_put(np);
return 0;
}
| linux-master | arch/powerpc/sysdev/xics/icp-opal.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ICS backend for OPAL managed interrupts.
*
* Copyright 2011 IBM Corp.
*/
//#define DEBUG
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/spinlock.h>
#include <linux/msi.h>
#include <linux/list.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xics.h>
#include <asm/opal.h>
#include <asm/firmware.h>
struct ics_native {
struct ics ics;
struct device_node *node;
void __iomem *base;
u32 ibase;
u32 icount;
};
#define to_ics_native(_ics) container_of(_ics, struct ics_native, ics)
static void __iomem *ics_native_xive(struct ics_native *in, unsigned int vec)
{
return in->base + 0x800 + ((vec - in->ibase) << 2);
}
static void ics_native_unmask_irq(struct irq_data *d)
{
unsigned int vec = (unsigned int)irqd_to_hwirq(d);
struct ics *ics = irq_data_get_irq_chip_data(d);
struct ics_native *in = to_ics_native(ics);
unsigned int server;
pr_devel("ics-native: unmask virq %d [hw 0x%x]\n", d->irq, vec);
if (vec < in->ibase || vec >= (in->ibase + in->icount))
return;
server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
out_be32(ics_native_xive(in, vec), (server << 8) | DEFAULT_PRIORITY);
}
static unsigned int ics_native_startup(struct irq_data *d)
{
#ifdef CONFIG_PCI_MSI
/*
* The generic MSI code returns with the interrupt disabled on the
* card, using the MSI mask bits. Firmware doesn't appear to unmask
* at that level, so we do it here by hand.
*/
if (irq_data_get_msi_desc(d))
pci_msi_unmask_irq(d);
#endif
/* unmask it */
ics_native_unmask_irq(d);
return 0;
}
static void ics_native_do_mask(struct ics_native *in, unsigned int vec)
{
out_be32(ics_native_xive(in, vec), 0xff);
}
static void ics_native_mask_irq(struct irq_data *d)
{
unsigned int vec = (unsigned int)irqd_to_hwirq(d);
struct ics *ics = irq_data_get_irq_chip_data(d);
struct ics_native *in = to_ics_native(ics);
pr_devel("ics-native: mask virq %d [hw 0x%x]\n", d->irq, vec);
if (vec < in->ibase || vec >= (in->ibase + in->icount))
return;
ics_native_do_mask(in, vec);
}
static int ics_native_set_affinity(struct irq_data *d,
const struct cpumask *cpumask,
bool force)
{
unsigned int vec = (unsigned int)irqd_to_hwirq(d);
struct ics *ics = irq_data_get_irq_chip_data(d);
struct ics_native *in = to_ics_native(ics);
int server;
u32 xive;
if (vec < in->ibase || vec >= (in->ibase + in->icount))
return -EINVAL;
server = xics_get_irq_server(d->irq, cpumask, 1);
if (server == -1) {
pr_warn("%s: No online cpus in the mask %*pb for irq %d\n",
__func__, cpumask_pr_args(cpumask), d->irq);
return -1;
}
xive = in_be32(ics_native_xive(in, vec));
xive = (xive & 0xff) | (server << 8);
out_be32(ics_native_xive(in, vec), xive);
return IRQ_SET_MASK_OK;
}
static struct irq_chip ics_native_irq_chip = {
.name = "ICS",
.irq_startup = ics_native_startup,
.irq_mask = ics_native_mask_irq,
.irq_unmask = ics_native_unmask_irq,
.irq_eoi = NULL, /* Patched at init time */
.irq_set_affinity = ics_native_set_affinity,
.irq_set_type = xics_set_irq_type,
.irq_retrigger = xics_retrigger,
};
static int ics_native_check(struct ics *ics, unsigned int hw_irq)
{
struct ics_native *in = to_ics_native(ics);
pr_devel("%s: hw_irq=0x%x\n", __func__, hw_irq);
if (hw_irq < in->ibase || hw_irq >= (in->ibase + in->icount))
return -EINVAL;
return 0;
}
static void ics_native_mask_unknown(struct ics *ics, unsigned long vec)
{
struct ics_native *in = to_ics_native(ics);
if (vec < in->ibase || vec >= (in->ibase + in->icount))
return;
ics_native_do_mask(in, vec);
}
static long ics_native_get_server(struct ics *ics, unsigned long vec)
{
struct ics_native *in = to_ics_native(ics);
u32 xive;
if (vec < in->ibase || vec >= (in->ibase + in->icount))
return -EINVAL;
xive = in_be32(ics_native_xive(in, vec));
return (xive >> 8) & 0xfff;
}
static int ics_native_host_match(struct ics *ics, struct device_node *node)
{
struct ics_native *in = to_ics_native(ics);
return in->node == node;
}
static struct ics ics_native_template = {
.check = ics_native_check,
.mask_unknown = ics_native_mask_unknown,
.get_server = ics_native_get_server,
.host_match = ics_native_host_match,
.chip = &ics_native_irq_chip,
};
static int __init ics_native_add_one(struct device_node *np)
{
struct ics_native *ics;
u32 ranges[2];
int rc, count;
ics = kzalloc(sizeof(struct ics_native), GFP_KERNEL);
if (!ics)
return -ENOMEM;
ics->node = of_node_get(np);
memcpy(&ics->ics, &ics_native_template, sizeof(struct ics));
ics->base = of_iomap(np, 0);
if (!ics->base) {
pr_err("Failed to map %pOFP\n", np);
rc = -ENOMEM;
goto fail;
}
count = of_property_count_u32_elems(np, "interrupt-ranges");
if (count < 2 || count & 1) {
pr_err("Failed to read interrupt-ranges of %pOFP\n", np);
rc = -EINVAL;
goto fail;
}
if (count > 2) {
pr_warn("ICS %pOFP has %d ranges, only one supported\n",
np, count >> 1);
}
rc = of_property_read_u32_array(np, "interrupt-ranges",
ranges, 2);
if (rc) {
pr_err("Failed to read interrupt-ranges of %pOFP\n", np);
goto fail;
}
ics->ibase = ranges[0];
ics->icount = ranges[1];
pr_info("ICS native initialized for sources %d..%d\n",
ics->ibase, ics->ibase + ics->icount - 1);
/* Register ourselves */
xics_register_ics(&ics->ics);
return 0;
fail:
of_node_put(ics->node);
kfree(ics);
return rc;
}
int __init ics_native_init(void)
{
struct device_node *ics;
bool found_one = false;
/* We need to patch our irq chip's EOI to point to the
* right ICP
*/
ics_native_irq_chip.irq_eoi = icp_ops->eoi;
/* Find native ICS in the device-tree */
for_each_compatible_node(ics, NULL, "openpower,xics-sources") {
if (ics_native_add_one(ics) == 0)
found_one = true;
}
if (found_one)
pr_info("ICS native backend registered\n");
return found_one ? 0 : -ENODEV;
}
| linux-master | arch/powerpc/sysdev/xics/ics-native.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/msi.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xics.h>
#include <asm/rtas.h>
/* RTAS service tokens */
static int ibm_get_xive;
static int ibm_set_xive;
static int ibm_int_on;
static int ibm_int_off;
static void ics_rtas_unmask_irq(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
int call_status;
int server;
pr_devel("xics: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq);
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return;
server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server,
DEFAULT_PRIORITY);
if (call_status != 0) {
printk(KERN_ERR
"%s: ibm_set_xive irq %u server %x returned %d\n",
__func__, hw_irq, server, call_status);
return;
}
/* Now unmask the interrupt (often a no-op) */
call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n",
__func__, hw_irq, call_status);
return;
}
}
static unsigned int ics_rtas_startup(struct irq_data *d)
{
/* unmask it */
ics_rtas_unmask_irq(d);
return 0;
}
static void ics_rtas_mask_real_irq(unsigned int hw_irq)
{
int call_status;
if (hw_irq == XICS_IPI)
return;
call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n",
__func__, hw_irq, call_status);
return;
}
/* Have to set XIVE to 0xff to be able to remove a slot */
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq,
xics_default_server, 0xff);
if (call_status != 0) {
printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n",
__func__, hw_irq, call_status);
return;
}
}
static void ics_rtas_mask_irq(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
pr_devel("xics: mask virq %d [hw 0x%x]\n", d->irq, hw_irq);
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return;
ics_rtas_mask_real_irq(hw_irq);
}
static int ics_rtas_set_affinity(struct irq_data *d,
const struct cpumask *cpumask,
bool force)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
int status;
int xics_status[2];
int irq_server;
if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
return -1;
status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq);
if (status) {
printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
__func__, hw_irq, status);
return -1;
}
irq_server = xics_get_irq_server(d->irq, cpumask, 1);
if (irq_server == -1) {
pr_warn("%s: No online cpus in the mask %*pb for irq %d\n",
__func__, cpumask_pr_args(cpumask), d->irq);
return -1;
}
pr_debug("%s: irq %d [hw 0x%x] server: 0x%x\n", __func__, d->irq,
hw_irq, irq_server);
status = rtas_call(ibm_set_xive, 3, 1, NULL,
hw_irq, irq_server, xics_status[1]);
if (status) {
printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
__func__, hw_irq, status);
return -1;
}
return IRQ_SET_MASK_OK;
}
static struct irq_chip ics_rtas_irq_chip = {
.name = "XICS",
.irq_startup = ics_rtas_startup,
.irq_mask = ics_rtas_mask_irq,
.irq_unmask = ics_rtas_unmask_irq,
.irq_eoi = NULL, /* Patched at init time */
.irq_set_affinity = ics_rtas_set_affinity,
.irq_set_type = xics_set_irq_type,
.irq_retrigger = xics_retrigger,
};
static int ics_rtas_check(struct ics *ics, unsigned int hw_irq)
{
int status[2];
int rc;
if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS))
return -EINVAL;
/* Check if RTAS knows about this interrupt */
rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq);
if (rc)
return -ENXIO;
return 0;
}
static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec)
{
ics_rtas_mask_real_irq(vec);
}
static long ics_rtas_get_server(struct ics *ics, unsigned long vec)
{
int rc, status[2];
rc = rtas_call(ibm_get_xive, 1, 3, status, vec);
if (rc)
return -1;
return status[0];
}
static int ics_rtas_host_match(struct ics *ics, struct device_node *node)
{
/* IBM machines have interrupt parents of various funky types for things
* like vdevices, events, etc... The trick we use here is to match
* everything here except the legacy 8259 which is compatible "chrp,iic"
*/
return !of_device_is_compatible(node, "chrp,iic");
}
/* Only one global & state struct ics */
static struct ics ics_rtas = {
.check = ics_rtas_check,
.mask_unknown = ics_rtas_mask_unknown,
.get_server = ics_rtas_get_server,
.host_match = ics_rtas_host_match,
.chip = &ics_rtas_irq_chip,
};
__init int ics_rtas_init(void)
{
ibm_get_xive = rtas_function_token(RTAS_FN_IBM_GET_XIVE);
ibm_set_xive = rtas_function_token(RTAS_FN_IBM_SET_XIVE);
ibm_int_on = rtas_function_token(RTAS_FN_IBM_INT_ON);
ibm_int_off = rtas_function_token(RTAS_FN_IBM_INT_OFF);
/* We enable the RTAS "ICS" if RTAS is present with the
* appropriate tokens
*/
if (ibm_get_xive == RTAS_UNKNOWN_SERVICE ||
ibm_set_xive == RTAS_UNKNOWN_SERVICE)
return -ENODEV;
/* We need to patch our irq chip's EOI to point to the
* right ICP
*/
ics_rtas_irq_chip.irq_eoi = icp_ops->eoi;
/* Register ourselves */
xics_register_ics(&ics_rtas);
return 0;
}
| linux-master | arch/powerpc/sysdev/xics/ics-rtas.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2011 IBM Corporation.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/cpu.h>
#include <linux/of.h>
#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xics.h>
#include <asm/io.h>
#include <asm/hvcall.h>
static inline unsigned int icp_hv_get_xirr(unsigned char cppr)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
unsigned int ret = XICS_IRQ_SPURIOUS;
rc = plpar_hcall(H_XIRR, retbuf, cppr);
if (rc == H_SUCCESS) {
ret = (unsigned int)retbuf[0];
} else {
pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n",
__func__, cppr, rc);
WARN_ON_ONCE(1);
}
return ret;
}
static inline void icp_hv_set_cppr(u8 value)
{
long rc = plpar_hcall_norets(H_CPPR, value);
if (rc != H_SUCCESS) {
pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n",
__func__, value, rc);
WARN_ON_ONCE(1);
}
}
static inline void icp_hv_set_xirr(unsigned int value)
{
long rc = plpar_hcall_norets(H_EOI, value);
if (rc != H_SUCCESS) {
pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n",
__func__, value, rc);
WARN_ON_ONCE(1);
icp_hv_set_cppr(value >> 24);
}
}
static inline void icp_hv_set_qirr(int n_cpu , u8 value)
{
int hw_cpu = get_hard_smp_processor_id(n_cpu);
long rc;
/* Make sure all previous accesses are ordered before IPI sending */
mb();
rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
if (rc != H_SUCCESS) {
pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
"returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
WARN_ON_ONCE(1);
}
}
static void icp_hv_eoi(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
iosync();
icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq);
}
static void icp_hv_teardown_cpu(void)
{
int cpu = smp_processor_id();
/* Clear any pending IPI */
icp_hv_set_qirr(cpu, 0xff);
}
static void icp_hv_flush_ipi(void)
{
/* We take the ipi irq but and never return so we
* need to EOI the IPI, but want to leave our priority 0
*
* should we check all the other interrupts too?
* should we be flagging idle loop instead?
* or creating some task to be scheduled?
*/
icp_hv_set_xirr((0x00 << 24) | XICS_IPI);
}
static unsigned int icp_hv_get_irq(void)
{
unsigned int xirr = icp_hv_get_xirr(xics_cppr_top());
unsigned int vec = xirr & 0x00ffffff;
unsigned int irq;
if (vec == XICS_IRQ_SPURIOUS)
return 0;
irq = irq_find_mapping(xics_host, vec);
if (likely(irq)) {
xics_push_cppr(vec);
return irq;
}
/* We don't have a linux mapping, so have rtas mask it. */
xics_mask_unknown_vec(vec);
/* We might learn about it later, so EOI it */
icp_hv_set_xirr(xirr);
return 0;
}
static void icp_hv_set_cpu_priority(unsigned char cppr)
{
xics_set_base_cppr(cppr);
icp_hv_set_cppr(cppr);
iosync();
}
#ifdef CONFIG_SMP
static void icp_hv_cause_ipi(int cpu)
{
icp_hv_set_qirr(cpu, IPI_PRIORITY);
}
static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id)
{
int cpu = smp_processor_id();
icp_hv_set_qirr(cpu, 0xff);
return smp_ipi_demux();
}
#endif /* CONFIG_SMP */
static const struct icp_ops icp_hv_ops = {
.get_irq = icp_hv_get_irq,
.eoi = icp_hv_eoi,
.set_priority = icp_hv_set_cpu_priority,
.teardown_cpu = icp_hv_teardown_cpu,
.flush_ipi = icp_hv_flush_ipi,
#ifdef CONFIG_SMP
.ipi_action = icp_hv_ipi_action,
.cause_ipi = icp_hv_cause_ipi,
#endif
};
int __init icp_hv_init(void)
{
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp");
if (!np)
np = of_find_node_by_type(NULL,
"PowerPC-External-Interrupt-Presentation");
if (!np)
return -ENODEV;
icp_ops = &icp_hv_ops;
of_node_put(np);
return 0;
}
| linux-master | arch/powerpc/sysdev/xics/icp-hv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* bpf_jit_comp64.c: eBPF JIT compiler
*
* Copyright 2016 Naveen N. Rao <[email protected]>
* IBM Corporation
*
* Based on the powerpc classic BPF JIT compiler by Matt Evans
*/
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
#include <asm/asm-compat.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <asm/kprobes.h>
#include <linux/bpf.h>
#include <asm/security_features.h>
#include "bpf_jit.h"
/*
* Stack layout:
* Ensure the top half (upto local_tmp_var) stays consistent
* with our redzone usage.
*
* [ prev sp ] <-------------
* [ nv gpr save area ] 5*8 |
* [ tail_call_cnt ] 8 |
* [ local_tmp_var ] 16 |
* fp (r31) --> [ ebpf stack space ] upto 512 |
* [ frame header ] 32/112 |
* sp (r1) ---> [ stack pointer ] --------------
*/
/* for gpr non volatile registers BPG_REG_6 to 10 */
#define BPF_PPC_STACK_SAVE (5*8)
/* for bpf JIT code internal usage */
#define BPF_PPC_STACK_LOCALS 24
/* stack frame excluding BPF stack, ensure this is quadword aligned */
#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
/* BPF register usage */
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
/* BPF to ppc register mappings */
void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
{
/* function return value */
ctx->b2p[BPF_REG_0] = _R8;
/* function arguments */
ctx->b2p[BPF_REG_1] = _R3;
ctx->b2p[BPF_REG_2] = _R4;
ctx->b2p[BPF_REG_3] = _R5;
ctx->b2p[BPF_REG_4] = _R6;
ctx->b2p[BPF_REG_5] = _R7;
/* non volatile registers */
ctx->b2p[BPF_REG_6] = _R27;
ctx->b2p[BPF_REG_7] = _R28;
ctx->b2p[BPF_REG_8] = _R29;
ctx->b2p[BPF_REG_9] = _R30;
/* frame pointer aka BPF_REG_10 */
ctx->b2p[BPF_REG_FP] = _R31;
/* eBPF jit internal registers */
ctx->b2p[BPF_REG_AX] = _R12;
ctx->b2p[TMP_REG_1] = _R9;
ctx->b2p[TMP_REG_2] = _R10;
}
/* PPC NVR range -- update this if we ever use NVRs below r27 */
#define BPF_PPC_NVR_MIN _R27
static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
{
/*
* We only need a stack frame if:
* - we call other functions (kernel helpers), or
* - the bpf program uses its stack area
* The latter condition is deduced from the usage of BPF_REG_FP
*/
return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
}
/*
* When not setting up our own stackframe, the redzone usage is:
*
* [ prev sp ] <-------------
* [ ... ] |
* sp (r1) ---> [ stack pointer ] --------------
* [ nv gpr save area ] 5*8
* [ tail_call_cnt ] 8
* [ local_tmp_var ] 16
* [ unused red zone ] 208 bytes protected
*/
static int bpf_jit_stack_local(struct codegen_context *ctx)
{
if (bpf_has_stack_frame(ctx))
return STACK_FRAME_MIN_SIZE + ctx->stack_size;
else
return -(BPF_PPC_STACK_SAVE + 24);
}
static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
{
return bpf_jit_stack_local(ctx) + 16;
}
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
{
if (reg >= BPF_PPC_NVR_MIN && reg < 32)
return (bpf_has_stack_frame(ctx) ?
(BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
- (8 * (32 - reg));
pr_err("BPF JIT is asking about unknown registers");
BUG();
}
void bpf_jit_realloc_regs(struct codegen_context *ctx)
{
}
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
{
int i;
#ifndef CONFIG_PPC_KERNEL_PCREL
if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
#endif
/*
* Initialize tail_call_cnt if we do tail calls.
* Otherwise, put in NOPs so that it can be skipped when we are
* invoked through a tail call.
*/
if (ctx->seen & SEEN_TAILCALL) {
EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
/* this goes in the redzone */
EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
} else {
EMIT(PPC_RAW_NOP());
EMIT(PPC_RAW_NOP());
}
if (bpf_has_stack_frame(ctx)) {
/*
* We need a stack frame, but we don't necessarily need to
* save/restore LR unless we call other functions
*/
if (ctx->seen & SEEN_FUNC) {
EMIT(PPC_RAW_MFLR(_R0));
EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
}
EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
}
/*
* Back up non-volatile regs -- BPF registers 6-10
* If we haven't created our own stack frame, we save these
* in the protected zone below the previous stack frame
*/
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
/* Setup frame pointer to point to the bpf stack area */
if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
STACK_FRAME_MIN_SIZE + ctx->stack_size));
}
static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
{
int i;
/* Restore NVRs */
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
/* Tear down our stack frame */
if (bpf_has_stack_frame(ctx)) {
EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
if (ctx->seen & SEEN_FUNC) {
EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
EMIT(PPC_RAW_MTLR(_R0));
}
}
}
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
{
bpf_jit_emit_common_epilogue(image, ctx);
/* Move result to r3 */
EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
EMIT(PPC_RAW_BLR());
}
static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
{
unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
long reladdr;
if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
return -EINVAL;
if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
reladdr = func_addr - CTX_NIA(ctx);
if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
pr_err("eBPF: address of %ps out of range of pcrel address.\n",
(void *)func);
return -ERANGE;
}
/* pla r12,addr */
EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
EMIT(PPC_RAW_MTCTR(_R12));
EMIT(PPC_RAW_BCTR());
} else {
reladdr = func_addr - kernel_toc_addr();
if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
return -ERANGE;
}
EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
EMIT(PPC_RAW_MTCTR(_R12));
EMIT(PPC_RAW_BCTRL());
}
return 0;
}
int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
{
unsigned int i, ctx_idx = ctx->idx;
if (WARN_ON_ONCE(func && is_module_text_address(func)))
return -EINVAL;
/* skip past descriptor if elf v1 */
func += FUNCTION_DESCR_SIZE;
/* Load function address into r12 */
PPC_LI64(_R12, func);
/* For bpf-to-bpf function calls, the callee's address is unknown
* until the last extra pass. As seen above, we use PPC_LI64() to
* load the callee's address, but this may optimize the number of
* instructions required based on the nature of the address.
*
* Since we don't want the number of instructions emitted to increase,
* we pad the optimized PPC_LI64() call with NOPs to guarantee that
* we always have a five-instruction sequence, which is the maximum
* that PPC_LI64() can emit.
*/
if (!image)
for (i = ctx->idx - ctx_idx; i < 5; i++)
EMIT(PPC_RAW_NOP());
EMIT(PPC_RAW_MTCTR(_R12));
EMIT(PPC_RAW_BCTRL());
return 0;
}
static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
{
/*
* By now, the eBPF program has already setup parameters in r3, r4 and r5
* r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
* r4/BPF_REG_2 - pointer to bpf_array
* r5/BPF_REG_3 - index in bpf_array
*/
int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
int b2p_index = bpf_to_ppc(BPF_REG_3);
int bpf_tailcall_prologue_size = 8;
if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
bpf_tailcall_prologue_size += 4; /* skip past the toc load */
/*
* if (index >= array->map.max_entries)
* goto out;
*/
EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
PPC_BCC_SHORT(COND_GE, out);
/*
* if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
* goto out;
*/
EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
PPC_BCC_SHORT(COND_GE, out);
/*
* tail_call_cnt++;
*/
EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
/* prog = array->ptrs[index]; */
EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
/*
* if (prog == NULL)
* goto out;
*/
EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
PPC_BCC_SHORT(COND_EQ, out);
/* goto *(prog->bpf_func + prologue_size); */
EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
/* tear down stack, restore NVRs, ... */
bpf_jit_emit_common_epilogue(image, ctx);
EMIT(PPC_RAW_BCTR());
/* out: */
return 0;
}
/*
* We spill into the redzone always, even if the bpf program has its own stackframe.
* Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
*/
void bpf_stf_barrier(void);
asm (
" .global bpf_stf_barrier ;"
" bpf_stf_barrier: ;"
" std 21,-64(1) ;"
" std 22,-56(1) ;"
" sync ;"
" ld 21,-64(1) ;"
" ld 22,-56(1) ;"
" ori 31,31,0 ;"
" .rept 14 ;"
" b 1f ;"
" 1: ;"
" .endr ;"
" blr ;"
);
/* Assemble the body code between the prologue & epilogue */
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass)
{
enum stf_barrier_type stf_barrier = stf_barrier_type_get();
const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len;
int i, ret;
/* Start of epilogue code - will only be valid 2nd pass onwards */
u32 exit_addr = addrs[flen];
for (i = 0; i < flen; i++) {
u32 code = insn[i].code;
u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
u32 src_reg = bpf_to_ppc(insn[i].src_reg);
u32 size = BPF_SIZE(code);
u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
u32 save_reg, ret_reg;
s16 off = insn[i].off;
s32 imm = insn[i].imm;
bool func_addr_fixed;
u64 func_addr;
u64 imm64;
u32 true_cond;
u32 tmp_idx;
int j;
/*
* addrs[] maps a BPF bytecode address into a real offset from
* the start of the body code.
*/
addrs[i] = ctx->idx * 4;
/*
* As an optimization, we note down which non-volatile registers
* are used so that we can only save/restore those in our
* prologue and epilogue. We do this here regardless of whether
* the actual BPF instruction uses src/dst registers or not
* (for instance, BPF_CALL does not use them). The expectation
* is that those instructions will have src_reg/dst_reg set to
* 0. Even otherwise, we just lose some prologue/epilogue
* optimization but everything else should work without
* any issues.
*/
if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
bpf_set_seen_register(ctx, dst_reg);
if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
bpf_set_seen_register(ctx, src_reg);
switch (code) {
/*
* Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
*/
case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
if (!imm) {
goto bpf_alu32_trunc;
} else if (imm >= -32768 && imm < 32768) {
EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
} else {
PPC_LI32(tmp1_reg, imm);
EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
if (!imm) {
goto bpf_alu32_trunc;
} else if (imm > -32768 && imm <= 32768) {
EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
} else {
PPC_LI32(tmp1_reg, imm);
EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
if (BPF_CLASS(code) == BPF_ALU)
EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
else
EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
if (imm >= -32768 && imm < 32768)
EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
else {
PPC_LI32(tmp1_reg, imm);
if (BPF_CLASS(code) == BPF_ALU)
EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
else
EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
if (BPF_OP(code) == BPF_MOD) {
EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
} else
EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
if (BPF_OP(code) == BPF_MOD) {
EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
} else
EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
if (imm == 0)
return -EINVAL;
if (imm == 1) {
if (BPF_OP(code) == BPF_DIV) {
goto bpf_alu32_trunc;
} else {
EMIT(PPC_RAW_LI(dst_reg, 0));
break;
}
}
PPC_LI32(tmp1_reg, imm);
switch (BPF_CLASS(code)) {
case BPF_ALU:
if (BPF_OP(code) == BPF_MOD) {
EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
} else
EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
break;
case BPF_ALU64:
if (BPF_OP(code) == BPF_MOD) {
EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
} else
EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
break;
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
case BPF_ALU64 | BPF_NEG: /* dst = -dst */
EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
goto bpf_alu32_trunc;
/*
* Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
*/
case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
if (!IMM_H(imm))
EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
else {
/* Sign-extended */
PPC_LI32(tmp1_reg, imm);
EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
/* Sign-extended */
PPC_LI32(tmp1_reg, imm);
EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
} else {
if (IMM_L(imm))
EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
if (IMM_H(imm))
EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
/* Sign-extended */
PPC_LI32(tmp1_reg, imm);
EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
} else {
if (IMM_L(imm))
EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
if (IMM_H(imm))
EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
/* slw clears top 32 bits */
EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
/* skip zero extension move, but set address map. */
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
/* with imm 0, we still need to clear top 32 bits */
EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
if (imm != 0)
EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
break;
case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
if (imm != 0)
EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
break;
case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
if (imm != 0)
EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
break;
/*
* MOV
*/
case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
if (imm == 1) {
/* special mov32 for zext */
EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
break;
}
EMIT(PPC_RAW_MR(dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
PPC_LI32(dst_reg, imm);
if (imm < 0)
goto bpf_alu32_trunc;
else if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
bpf_alu32_trunc:
/* Truncate to 32-bits */
if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
break;
/*
* BPF_FROM_BE/LE
*/
case BPF_ALU | BPF_END | BPF_FROM_LE:
case BPF_ALU | BPF_END | BPF_FROM_BE:
#ifdef __BIG_ENDIAN__
if (BPF_SRC(code) == BPF_FROM_BE)
goto emit_clear;
#else /* !__BIG_ENDIAN__ */
if (BPF_SRC(code) == BPF_FROM_LE)
goto emit_clear;
#endif
switch (imm) {
case 16:
/* Rotate 8 bits left & mask with 0x0000ff00 */
EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
/* Rotate 8 bits right & insert LSB to reg */
EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
/* Move result back to dst_reg */
EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
break;
case 32:
/*
* Rotate word left by 8 bits:
* 2 bytes are already in their final position
* -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
*/
EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
/* Rotate 24 bits and insert byte 1 */
EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
/* Rotate 24 bits and insert byte 3 */
EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
break;
case 64:
/* Store the value to stack and then use byte-reverse loads */
EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
} else {
EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
EMIT(PPC_RAW_LI(tmp2_reg, 4));
EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
}
break;
}
break;
emit_clear:
switch (imm) {
case 16:
/* zero-extend 16 bits into 64 bits */
EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case 32:
if (!fp->aux->verifier_zext)
/* zero-extend 32 bits into 64 bits */
EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
break;
case 64:
/* nop */
break;
}
break;
/*
* BPF_ST NOSPEC (speculation barrier)
*/
case BPF_ST | BPF_NOSPEC:
if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
!security_ftr_enabled(SEC_FTR_STF_BARRIER))
break;
switch (stf_barrier) {
case STF_BARRIER_EIEIO:
EMIT(PPC_RAW_EIEIO() | 0x02000000);
break;
case STF_BARRIER_SYNC_ORI:
EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
EMIT(PPC_RAW_ORI(_R31, _R31, 0));
break;
case STF_BARRIER_FALLBACK:
ctx->seen |= SEEN_FUNC;
PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
EMIT(PPC_RAW_MTCTR(_R12));
EMIT(PPC_RAW_BCTRL());
break;
case STF_BARRIER_NONE:
break;
}
break;
/*
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
if (BPF_CLASS(code) == BPF_ST) {
EMIT(PPC_RAW_LI(tmp1_reg, imm));
src_reg = tmp1_reg;
}
EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
if (BPF_CLASS(code) == BPF_ST) {
EMIT(PPC_RAW_LI(tmp1_reg, imm));
src_reg = tmp1_reg;
}
EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
if (BPF_CLASS(code) == BPF_ST) {
PPC_LI32(tmp1_reg, imm);
src_reg = tmp1_reg;
}
EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
if (BPF_CLASS(code) == BPF_ST) {
PPC_LI32(tmp1_reg, imm);
src_reg = tmp1_reg;
}
if (off % 4) {
EMIT(PPC_RAW_LI(tmp2_reg, off));
EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
} else {
EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
}
break;
/*
* BPF_STX ATOMIC (atomic ops)
*/
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
save_reg = tmp2_reg;
ret_reg = src_reg;
/* Get offset into TMP_REG_1 */
EMIT(PPC_RAW_LI(tmp1_reg, off));
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
if (size == BPF_DW)
EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
else
EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
/* Save old value in _R0 */
if (imm & BPF_FETCH)
EMIT(PPC_RAW_MR(_R0, tmp2_reg));
switch (imm) {
case BPF_ADD:
case BPF_ADD | BPF_FETCH:
EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
break;
case BPF_AND:
case BPF_AND | BPF_FETCH:
EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
break;
case BPF_OR:
case BPF_OR | BPF_FETCH:
EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
break;
case BPF_XOR:
case BPF_XOR | BPF_FETCH:
EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
break;
case BPF_CMPXCHG:
/*
* Return old value in BPF_REG_0 for BPF_CMPXCHG &
* in src_reg for other cases.
*/
ret_reg = bpf_to_ppc(BPF_REG_0);
/* Compare with old value in BPF_R0 */
if (size == BPF_DW)
EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
else
EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
/* Don't set if different from old value */
PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
fallthrough;
case BPF_XCHG:
save_reg = src_reg;
break;
default:
pr_err_ratelimited(
"eBPF filter atomic op code %02x (@%d) unsupported\n",
code, i);
return -EOPNOTSUPP;
}
/* store new value */
if (size == BPF_DW)
EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
else
EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, tmp_idx);
if (imm & BPF_FETCH) {
EMIT(PPC_RAW_MR(ret_reg, _R0));
/*
* Skip unnecessary zero-extension for 32-bit cmpxchg.
* For context, see commit 39491867ace5.
*/
if (size != BPF_DW && imm == BPF_CMPXCHG &&
insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
}
break;
/*
* BPF_LDX
*/
/* dst = *(u8 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_B:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
/* dst = *(u16 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
/* dst = *(u32 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
/* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_DW:
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
/*
* As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
* kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
* load only if addr is kernel address (see is_kernel_addr()), otherwise
* set dst_reg=0 and move on.
*/
if (BPF_MODE(code) == BPF_PROBE_MEM) {
EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
PPC_LI64(tmp2_reg, 0x8000000000000000ul);
else /* BOOK3S_64 */
PPC_LI64(tmp2_reg, PAGE_OFFSET);
EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
EMIT(PPC_RAW_LI(dst_reg, 0));
/*
* Check if 'off' is word aligned for BPF_DW, because
* we might generate two instructions.
*/
if (BPF_SIZE(code) == BPF_DW && (off & 3))
PPC_JMP((ctx->idx + 3) * 4);
else
PPC_JMP((ctx->idx + 2) * 4);
}
switch (size) {
case BPF_B:
EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
break;
case BPF_H:
EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
break;
case BPF_W:
EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
break;
case BPF_DW:
if (off % 4) {
EMIT(PPC_RAW_LI(tmp1_reg, off));
EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
} else {
EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
}
break;
}
if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
if (BPF_MODE(code) == BPF_PROBE_MEM) {
ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
4, dst_reg);
if (ret)
return ret;
}
break;
/*
* Doubleword load
* 16 byte instruction that uses two 'struct bpf_insn'
*/
case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
imm64 = ((u64)(u32) insn[i].imm) |
(((u64)(u32) insn[i+1].imm) << 32);
tmp_idx = ctx->idx;
PPC_LI64(dst_reg, imm64);
/* padding to allow full 5 instructions for later patching */
if (!image)
for (j = ctx->idx - tmp_idx; j < 5; j++)
EMIT(PPC_RAW_NOP());
/* Adjust for two bpf instructions */
addrs[++i] = ctx->idx * 4;
break;
/*
* Return/Exit
*/
case BPF_JMP | BPF_EXIT:
/*
* If this isn't the very last instruction, branch to
* the epilogue. If we _are_ the last instruction,
* we'll just fall through to the epilogue.
*/
if (i != flen - 1) {
ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
if (ret)
return ret;
}
/* else fall through to the epilogue */
break;
/*
* Call kernel helper or bpf function
*/
case BPF_JMP | BPF_CALL:
ctx->seen |= SEEN_FUNC;
ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
&func_addr, &func_addr_fixed);
if (ret < 0)
return ret;
if (func_addr_fixed)
ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
else
ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
if (ret)
return ret;
/* move return value from r3 to BPF_REG_0 */
EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
break;
/*
* Jumps and branches
*/
case BPF_JMP | BPF_JA:
PPC_JMP(addrs[i + 1 + off]);
break;
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP32 | BPF_JGT | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_X:
case BPF_JMP32 | BPF_JSGT | BPF_K:
case BPF_JMP32 | BPF_JSGT | BPF_X:
true_cond = COND_GT;
goto cond_branch;
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP32 | BPF_JLT | BPF_K:
case BPF_JMP32 | BPF_JLT | BPF_X:
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSLT | BPF_X:
true_cond = COND_LT;
goto cond_branch;
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JGE | BPF_K:
case BPF_JMP32 | BPF_JGE | BPF_X:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_X:
true_cond = COND_GE;
goto cond_branch;
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP32 | BPF_JLE | BPF_K:
case BPF_JMP32 | BPF_JLE | BPF_X:
case BPF_JMP32 | BPF_JSLE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_X:
true_cond = COND_LE;
goto cond_branch;
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_X:
true_cond = COND_EQ;
goto cond_branch;
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JNE | BPF_X:
true_cond = COND_NE;
goto cond_branch;
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_X:
true_cond = COND_NE;
/* Fall through */
cond_branch:
switch (code) {
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP32 | BPF_JGT | BPF_X:
case BPF_JMP32 | BPF_JLT | BPF_X:
case BPF_JMP32 | BPF_JGE | BPF_X:
case BPF_JMP32 | BPF_JLE | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_X:
/* unsigned comparison */
if (BPF_CLASS(code) == BPF_JMP32)
EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
else
EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
break;
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP32 | BPF_JSGT | BPF_X:
case BPF_JMP32 | BPF_JSLT | BPF_X:
case BPF_JMP32 | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JSLE | BPF_X:
/* signed comparison */
if (BPF_CLASS(code) == BPF_JMP32)
EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
else
EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
break;
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X:
if (BPF_CLASS(code) == BPF_JMP) {
EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
} else {
EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
}
break;
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_K:
case BPF_JMP32 | BPF_JLT | BPF_K:
case BPF_JMP32 | BPF_JGE | BPF_K:
case BPF_JMP32 | BPF_JLE | BPF_K:
{
bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
/*
* Need sign-extended load, so only positive
* values can be used as imm in cmpldi
*/
if (imm >= 0 && imm < 32768) {
if (is_jmp32)
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
else
EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
} else {
/* sign-extending load */
PPC_LI32(tmp1_reg, imm);
/* ... but unsigned comparison */
if (is_jmp32)
EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
else
EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
}
break;
}
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP32 | BPF_JSGT | BPF_K:
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_K:
{
bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
/*
* signed comparison, so any 16-bit value
* can be used in cmpdi
*/
if (imm >= -32768 && imm < 32768) {
if (is_jmp32)
EMIT(PPC_RAW_CMPWI(dst_reg, imm));
else
EMIT(PPC_RAW_CMPDI(dst_reg, imm));
} else {
PPC_LI32(tmp1_reg, imm);
if (is_jmp32)
EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
else
EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
}
break;
}
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_K:
/* andi does not sign-extend the immediate */
if (imm >= 0 && imm < 32768)
/* PPC_ANDI is _only/always_ dot-form */
EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
else {
PPC_LI32(tmp1_reg, imm);
if (BPF_CLASS(code) == BPF_JMP) {
EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
tmp1_reg));
} else {
EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
0, 0, 31));
}
}
break;
}
PPC_BCC(true_cond, addrs[i + 1 + off]);
break;
/*
* Tail call
*/
case BPF_JMP | BPF_TAIL_CALL:
ctx->seen |= SEEN_TAILCALL;
ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
if (ret < 0)
return ret;
break;
default:
/*
* The filter contains something cruel & unusual.
* We don't handle it, but also there shouldn't be
* anything missing from our list.
*/
pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
code, i);
return -ENOTSUPP;
}
}
/* Set end-of-body-code address for exit. */
addrs[i] = ctx->idx * 4;
return 0;
}
| linux-master | arch/powerpc/net/bpf_jit_comp64.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* eBPF JIT compiler
*
* Copyright 2016 Naveen N. Rao <[email protected]>
* IBM Corporation
*
* Based on the powerpc classic BPF JIT compiler by Matt Evans
*/
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
#include <asm/asm-compat.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <asm/kprobes.h>
#include <linux/bpf.h>
#include "bpf_jit.h"
static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
{
memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
}
int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
{
if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
PPC_JMP(exit_addr);
} else if (ctx->alt_exit_addr) {
if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4))))
return -1;
PPC_JMP(ctx->alt_exit_addr);
} else {
ctx->alt_exit_addr = ctx->idx * 4;
bpf_jit_build_epilogue(image, ctx);
}
return 0;
}
struct powerpc64_jit_data {
struct bpf_binary_header *header;
u32 *addrs;
u8 *image;
u32 proglen;
struct codegen_context ctx;
};
bool bpf_jit_needs_zext(void)
{
return true;
}
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
{
u32 proglen;
u32 alloclen;
u8 *image = NULL;
u32 *code_base;
u32 *addrs;
struct powerpc64_jit_data *jit_data;
struct codegen_context cgctx;
int pass;
int flen;
struct bpf_binary_header *bpf_hdr;
struct bpf_prog *org_fp = fp;
struct bpf_prog *tmp_fp;
bool bpf_blinded = false;
bool extra_pass = false;
u32 extable_len;
u32 fixup_len;
if (!fp->jit_requested)
return org_fp;
tmp_fp = bpf_jit_blind_constants(org_fp);
if (IS_ERR(tmp_fp))
return org_fp;
if (tmp_fp != org_fp) {
bpf_blinded = true;
fp = tmp_fp;
}
jit_data = fp->aux->jit_data;
if (!jit_data) {
jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
if (!jit_data) {
fp = org_fp;
goto out;
}
fp->aux->jit_data = jit_data;
}
flen = fp->len;
addrs = jit_data->addrs;
if (addrs) {
cgctx = jit_data->ctx;
image = jit_data->image;
bpf_hdr = jit_data->header;
proglen = jit_data->proglen;
extra_pass = true;
/* During extra pass, ensure index is reset before repopulating extable entries */
cgctx.exentry_idx = 0;
goto skip_init_ctx;
}
addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
if (addrs == NULL) {
fp = org_fp;
goto out_addrs;
}
memset(&cgctx, 0, sizeof(struct codegen_context));
bpf_jit_init_reg_mapping(&cgctx);
/* Make sure that the stack is quadword aligned. */
cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
/* Scouting faux-generate pass 0 */
if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) {
/* We hit something illegal or unsupported. */
fp = org_fp;
goto out_addrs;
}
/*
* If we have seen a tail call, we need a second pass.
* This is because bpf_jit_emit_common_epilogue() is called
* from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
* We also need a second pass if we ended up with too large
* a program so as to ensure BPF_EXIT branches are in range.
*/
if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
cgctx.idx = 0;
if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0, false)) {
fp = org_fp;
goto out_addrs;
}
}
bpf_jit_realloc_regs(&cgctx);
/*
* Pretend to build prologue, given the features we've seen. This will
* update ctgtx.idx as it pretends to output instructions, then we can
* calculate total size from idx.
*/
bpf_jit_build_prologue(0, &cgctx);
addrs[fp->len] = cgctx.idx * 4;
bpf_jit_build_epilogue(0, &cgctx);
fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
proglen = cgctx.idx * 4;
alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
if (!bpf_hdr) {
fp = org_fp;
goto out_addrs;
}
if (extable_len)
fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len;
skip_init_ctx:
code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
/* Code generation passes 1-2 */
for (pass = 1; pass < 3; pass++) {
/* Now build the prologue, body code & epilogue for real. */
cgctx.idx = 0;
cgctx.alt_exit_addr = 0;
bpf_jit_build_prologue(code_base, &cgctx);
if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass, extra_pass)) {
bpf_jit_binary_free(bpf_hdr);
fp = org_fp;
goto out_addrs;
}
bpf_jit_build_epilogue(code_base, &cgctx);
if (bpf_jit_enable > 1)
pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
proglen - (cgctx.idx * 4), cgctx.seen);
}
if (bpf_jit_enable > 1)
/*
* Note that we output the base address of the code_base
* rather than image, since opcodes are in code_base.
*/
bpf_jit_dump(flen, proglen, pass, code_base);
#ifdef CONFIG_PPC64_ELF_ABI_V1
/* Function descriptor nastiness: Address + TOC */
((u64 *)image)[0] = (u64)code_base;
((u64 *)image)[1] = local_paca->kernel_toc;
#endif
fp->bpf_func = (void *)image;
fp->jited = 1;
fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size);
if (!fp->is_func || extra_pass) {
bpf_jit_binary_lock_ro(bpf_hdr);
bpf_prog_fill_jited_linfo(fp, addrs);
out_addrs:
kfree(addrs);
kfree(jit_data);
fp->aux->jit_data = NULL;
} else {
jit_data->addrs = addrs;
jit_data->ctx = cgctx;
jit_data->proglen = proglen;
jit_data->image = image;
jit_data->header = bpf_hdr;
}
out:
if (bpf_blinded)
bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
return fp;
}
/*
* The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
* this function, as this only applies to BPF_PROBE_MEM, for now.
*/
int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
int insn_idx, int jmp_off, int dst_reg)
{
off_t offset;
unsigned long pc;
struct exception_table_entry *ex;
u32 *fixup;
/* Populate extable entries only in the last pass */
if (pass != 2)
return 0;
if (!fp->aux->extable ||
WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
return -EINVAL;
pc = (unsigned long)&image[insn_idx];
fixup = (void *)fp->aux->extable -
(fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
(ctx->exentry_idx * BPF_FIXUP_LEN * 4);
fixup[0] = PPC_RAW_LI(dst_reg, 0);
if (IS_ENABLED(CONFIG_PPC32))
fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */
fixup[BPF_FIXUP_LEN - 1] =
PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
ex = &fp->aux->extable[ctx->exentry_idx];
offset = pc - (long)&ex->insn;
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
return -ERANGE;
ex->insn = offset;
offset = (long)fixup - (long)&ex->fixup;
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
return -ERANGE;
ex->fixup = offset;
ctx->exentry_idx++;
return 0;
}
| linux-master | arch/powerpc/net/bpf_jit_comp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* eBPF JIT compiler for PPC32
*
* Copyright 2020 Christophe Leroy <[email protected]>
* CS GROUP France
*
* Based on PPC64 eBPF JIT compiler by Naveen N. Rao
*/
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
#include <asm/asm-compat.h>
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
#include <asm/kprobes.h>
#include <linux/bpf.h>
#include "bpf_jit.h"
/*
* Stack layout:
*
* [ prev sp ] <-------------
* [ nv gpr save area ] 16 * 4 |
* fp (r31) --> [ ebpf stack space ] upto 512 |
* [ frame header ] 16 |
* sp (r1) ---> [ stack pointer ] --------------
*/
/* for gpr non volatile registers r17 to r31 (14) + tail call */
#define BPF_PPC_STACK_SAVE (15 * 4 + 4)
/* stack frame, ensure this is quadword aligned */
#define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size)
#define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0))
/* PPC NVR range -- update this if we ever use NVRs below r17 */
#define BPF_PPC_NVR_MIN _R17
#define BPF_PPC_TC _R16
/* BPF register usage */
#define TMP_REG (MAX_BPF_JIT_REG + 0)
/* BPF to ppc register mappings */
void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
{
/* function return value */
ctx->b2p[BPF_REG_0] = _R12;
/* function arguments */
ctx->b2p[BPF_REG_1] = _R4;
ctx->b2p[BPF_REG_2] = _R6;
ctx->b2p[BPF_REG_3] = _R8;
ctx->b2p[BPF_REG_4] = _R10;
ctx->b2p[BPF_REG_5] = _R22;
/* non volatile registers */
ctx->b2p[BPF_REG_6] = _R24;
ctx->b2p[BPF_REG_7] = _R26;
ctx->b2p[BPF_REG_8] = _R28;
ctx->b2p[BPF_REG_9] = _R30;
/* frame pointer aka BPF_REG_10 */
ctx->b2p[BPF_REG_FP] = _R18;
/* eBPF jit internal registers */
ctx->b2p[BPF_REG_AX] = _R20;
ctx->b2p[TMP_REG] = _R31; /* 32 bits */
}
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
{
if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC)
return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg);
WARN(true, "BPF JIT is asking about unknown registers, will crash the stack");
/* Use the hole we have left for alignment */
return BPF_PPC_STACKFRAME(ctx) - 4;
}
#define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */
#define SEEN_NVREG_FULL_MASK 0x0003ffff /* Non volatile registers r14-r31 */
#define SEEN_NVREG_TEMP_MASK 0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */
static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
{
/*
* We only need a stack frame if:
* - we call other functions (kernel helpers), or
* - we use non volatile registers, or
* - we use tail call counter
* - the bpf program uses its stack area
* The latter condition is deduced from the usage of BPF_REG_FP
*/
return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) ||
bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
}
void bpf_jit_realloc_regs(struct codegen_context *ctx)
{
unsigned int nvreg_mask;
if (ctx->seen & SEEN_FUNC)
nvreg_mask = SEEN_NVREG_TEMP_MASK;
else
nvreg_mask = SEEN_NVREG_FULL_MASK;
while (ctx->seen & nvreg_mask &&
(ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) {
int old = 32 - fls(ctx->seen & (nvreg_mask & 0xaaaaaaab));
int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa));
int i;
for (i = BPF_REG_0; i <= TMP_REG; i++) {
if (ctx->b2p[i] != old)
continue;
ctx->b2p[i] = new;
bpf_set_seen_register(ctx, new);
bpf_clear_seen_register(ctx, old);
if (i != TMP_REG) {
bpf_set_seen_register(ctx, new - 1);
bpf_clear_seen_register(ctx, old - 1);
}
break;
}
}
}
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
{
int i;
/* Initialize tail_call_cnt, to be skipped if we do tail calls. */
if (ctx->seen & SEEN_TAILCALL)
EMIT(PPC_RAW_LI(_R4, 0));
else
EMIT(PPC_RAW_NOP());
#define BPF_TAILCALL_PROLOGUE_SIZE 4
if (bpf_has_stack_frame(ctx))
EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
if (ctx->seen & SEEN_TAILCALL)
EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
/* First arg comes in as a 32 bits pointer. */
EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3));
EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0));
/*
* We need a stack frame, but we don't necessarily need to
* save/restore LR unless we call other functions
*/
if (ctx->seen & SEEN_FUNC)
EMIT(PPC_RAW_MFLR(_R0));
/*
* Back up non-volatile regs -- registers r18-r31
*/
for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
if (bpf_is_seen_register(ctx, i))
EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
/* Setup frame pointer to point to the bpf stack area */
if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) {
EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_FP) - 1, 0));
EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
STACK_FRAME_MIN_SIZE + ctx->stack_size));
}
if (ctx->seen & SEEN_FUNC)
EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
}
static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
{
int i;
/* Restore NVRs */
for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
if (bpf_is_seen_register(ctx, i))
EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
if (ctx->seen & SEEN_FUNC)
EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
/* Tear down our stack frame */
if (bpf_has_stack_frame(ctx))
EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
if (ctx->seen & SEEN_FUNC)
EMIT(PPC_RAW_MTLR(_R0));
}
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
{
EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
bpf_jit_emit_common_epilogue(image, ctx);
EMIT(PPC_RAW_BLR());
}
int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
{
s32 rel = (s32)func - (s32)(image + ctx->idx);
if (image && rel < 0x2000000 && rel >= -0x2000000) {
PPC_BL(func);
} else {
/* Load function address into r0 */
EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func)));
EMIT(PPC_RAW_MTCTR(_R0));
EMIT(PPC_RAW_BCTRL());
}
return 0;
}
static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
{
/*
* By now, the eBPF program has already setup parameters in r3-r6
* r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
* r5-r6/BPF_REG_2 - pointer to bpf_array
* r7-r8/BPF_REG_3 - index in bpf_array
*/
int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
int b2p_index = bpf_to_ppc(BPF_REG_3);
/*
* if (index >= array->map.max_entries)
* goto out;
*/
EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
EMIT(PPC_RAW_CMPLW(b2p_index, _R0));
EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
PPC_BCC_SHORT(COND_GE, out);
/*
* if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
* goto out;
*/
EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT));
/* tail_call_cnt++; */
EMIT(PPC_RAW_ADDIC(_R0, _R0, 1));
PPC_BCC_SHORT(COND_GE, out);
/* prog = array->ptrs[index]; */
EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
/*
* if (prog == NULL)
* goto out;
*/
EMIT(PPC_RAW_CMPLWI(_R3, 0));
PPC_BCC_SHORT(COND_EQ, out);
/* goto *(prog->bpf_func + prologue_size); */
EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
EMIT(PPC_RAW_MTCTR(_R3));
EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_1)));
/* Put tail_call_cnt in r4 */
EMIT(PPC_RAW_MR(_R4, _R0));
/* tear restore NVRs, ... */
bpf_jit_emit_common_epilogue(image, ctx);
EMIT(PPC_RAW_BCTR());
/* out: */
return 0;
}
/* Assemble the body code between the prologue & epilogue */
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
u32 *addrs, int pass, bool extra_pass)
{
const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len;
int i, ret;
/* Start of epilogue code - will only be valid 2nd pass onwards */
u32 exit_addr = addrs[flen];
for (i = 0; i < flen; i++) {
u32 code = insn[i].code;
u32 prevcode = i ? insn[i - 1].code : 0;
u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
u32 dst_reg_h = dst_reg - 1;
u32 src_reg = bpf_to_ppc(insn[i].src_reg);
u32 src_reg_h = src_reg - 1;
u32 src2_reg = dst_reg;
u32 src2_reg_h = dst_reg_h;
u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
u32 tmp_reg = bpf_to_ppc(TMP_REG);
u32 size = BPF_SIZE(code);
u32 save_reg, ret_reg;
s16 off = insn[i].off;
s32 imm = insn[i].imm;
bool func_addr_fixed;
u64 func_addr;
u32 true_cond;
u32 tmp_idx;
int j;
if (i && (BPF_CLASS(code) == BPF_ALU64 || BPF_CLASS(code) == BPF_ALU) &&
(BPF_CLASS(prevcode) == BPF_ALU64 || BPF_CLASS(prevcode) == BPF_ALU) &&
BPF_OP(prevcode) == BPF_MOV && BPF_SRC(prevcode) == BPF_X &&
insn[i - 1].dst_reg == insn[i].dst_reg && insn[i - 1].imm != 1) {
src2_reg = bpf_to_ppc(insn[i - 1].src_reg);
src2_reg_h = src2_reg - 1;
ctx->idx = addrs[i - 1] / 4;
}
/*
* addrs[] maps a BPF bytecode address into a real offset from
* the start of the body code.
*/
addrs[i] = ctx->idx * 4;
/*
* As an optimization, we note down which registers
* are used so that we can only save/restore those in our
* prologue and epilogue. We do this here regardless of whether
* the actual BPF instruction uses src/dst registers or not
* (for instance, BPF_CALL does not use them). The expectation
* is that those instructions will have src_reg/dst_reg set to
* 0. Even otherwise, we just lose some prologue/epilogue
* optimization but everything else should work without
* any issues.
*/
if (dst_reg >= 3 && dst_reg < 32) {
bpf_set_seen_register(ctx, dst_reg);
bpf_set_seen_register(ctx, dst_reg_h);
}
if (src_reg >= 3 && src_reg < 32) {
bpf_set_seen_register(ctx, src_reg);
bpf_set_seen_register(ctx, src_reg_h);
}
switch (code) {
/*
* Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
*/
case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
EMIT(PPC_RAW_ADD(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, src_reg));
EMIT(PPC_RAW_ADDE(dst_reg_h, src2_reg_h, src_reg_h));
break;
case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, src2_reg));
EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, src2_reg_h));
break;
case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
imm = -imm;
fallthrough;
case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
if (!imm) {
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
} else if (IMM_HA(imm) & 0xffff) {
EMIT(PPC_RAW_ADDIS(dst_reg, src2_reg, IMM_HA(imm)));
src2_reg = dst_reg;
}
if (IMM_L(imm))
EMIT(PPC_RAW_ADDI(dst_reg, src2_reg, IMM_L(imm)));
break;
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
imm = -imm;
fallthrough;
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
if (!imm) {
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
break;
}
if (imm >= -32768 && imm < 32768) {
EMIT(PPC_RAW_ADDIC(dst_reg, src2_reg, imm));
} else {
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, _R0));
}
if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
EMIT(PPC_RAW_ADDZE(dst_reg_h, src2_reg_h));
else
EMIT(PPC_RAW_ADDME(dst_reg_h, src2_reg_h));
break;
case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
bpf_set_seen_register(ctx, tmp_reg);
EMIT(PPC_RAW_MULW(_R0, src2_reg, src_reg_h));
EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, src_reg));
EMIT(PPC_RAW_MULHWU(tmp_reg, src2_reg, src_reg));
EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg));
EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg));
break;
case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
if (imm == 1) {
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
} else if (imm == -1) {
EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
} else if (is_power_of_2((u32)imm)) {
EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, ilog2(imm)));
} else if (imm >= -32768 && imm < 32768) {
EMIT(PPC_RAW_MULI(dst_reg, src2_reg, imm));
} else {
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_MULW(dst_reg, src2_reg, _R0));
}
break;
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
if (!imm) {
PPC_LI32(dst_reg, 0);
PPC_LI32(dst_reg_h, 0);
} else if (imm == 1) {
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
} else if (imm == -1) {
EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
} else if (imm > 0 && is_power_of_2(imm)) {
imm = ilog2(imm);
EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm));
EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31));
EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm));
} else {
bpf_set_seen_register(ctx, tmp_reg);
PPC_LI32(tmp_reg, imm);
EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, tmp_reg));
if (imm < 0)
EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, src2_reg));
EMIT(PPC_RAW_MULHWU(_R0, src2_reg, tmp_reg));
EMIT(PPC_RAW_MULW(dst_reg, src2_reg, tmp_reg));
EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
}
break;
case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
break;
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
return -EOPNOTSUPP;
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
return -EOPNOTSUPP;
case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
if (!imm)
return -EINVAL;
if (imm == 1) {
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
} else if (is_power_of_2((u32)imm)) {
EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
} else {
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
}
break;
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
if (!imm)
return -EINVAL;
if (!is_power_of_2((u32)imm)) {
bpf_set_seen_register(ctx, tmp_reg);
PPC_LI32(tmp_reg, imm);
EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
} else if (imm == 1) {
EMIT(PPC_RAW_LI(dst_reg, 0));
} else {
imm = ilog2((u32)imm);
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - imm, 31));
}
break;
case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
if (!imm)
return -EINVAL;
if (imm < 0)
imm = -imm;
if (!is_power_of_2(imm))
return -EOPNOTSUPP;
if (imm == 1)
EMIT(PPC_RAW_LI(dst_reg, 0));
else
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - ilog2(imm), 31));
EMIT(PPC_RAW_LI(dst_reg_h, 0));
break;
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
if (!imm)
return -EINVAL;
if (!is_power_of_2(abs(imm)))
return -EOPNOTSUPP;
if (imm < 0) {
EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
imm = -imm;
src2_reg = dst_reg;
}
if (imm == 1) {
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
} else {
imm = ilog2(imm);
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm));
}
break;
case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
EMIT(PPC_RAW_NEG(dst_reg, src2_reg));
break;
case BPF_ALU64 | BPF_NEG: /* dst = -dst */
EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
break;
/*
* Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
*/
case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg));
EMIT(PPC_RAW_AND(dst_reg_h, src2_reg_h, src_reg_h));
break;
case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
if (imm >= 0)
EMIT(PPC_RAW_LI(dst_reg_h, 0));
fallthrough;
case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
if (!IMM_H(imm)) {
EMIT(PPC_RAW_ANDI(dst_reg, src2_reg, IMM_L(imm)));
} else if (!IMM_L(imm)) {
EMIT(PPC_RAW_ANDIS(dst_reg, src2_reg, IMM_H(imm)));
} else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) {
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0,
32 - fls(imm), 32 - ffs(imm)));
} else {
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_AND(dst_reg, src2_reg, _R0));
}
break;
case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg));
EMIT(PPC_RAW_OR(dst_reg_h, src2_reg_h, src_reg_h));
break;
case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
/* Sign-extended */
if (imm < 0)
EMIT(PPC_RAW_LI(dst_reg_h, -1));
fallthrough;
case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
if (IMM_L(imm)) {
EMIT(PPC_RAW_ORI(dst_reg, src2_reg, IMM_L(imm)));
src2_reg = dst_reg;
}
if (IMM_H(imm))
EMIT(PPC_RAW_ORIS(dst_reg, src2_reg, IMM_H(imm)));
break;
case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
if (dst_reg == src_reg) {
EMIT(PPC_RAW_LI(dst_reg, 0));
EMIT(PPC_RAW_LI(dst_reg_h, 0));
} else {
EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg));
EMIT(PPC_RAW_XOR(dst_reg_h, src2_reg_h, src_reg_h));
}
break;
case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
if (dst_reg == src_reg)
EMIT(PPC_RAW_LI(dst_reg, 0));
else
EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
if (imm < 0)
EMIT(PPC_RAW_NOR(dst_reg_h, src2_reg_h, src2_reg_h));
fallthrough;
case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
if (IMM_L(imm)) {
EMIT(PPC_RAW_XORI(dst_reg, src2_reg, IMM_L(imm)));
src2_reg = dst_reg;
}
if (IMM_H(imm))
EMIT(PPC_RAW_XORIS(dst_reg, src2_reg, IMM_H(imm)));
break;
case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
bpf_set_seen_register(ctx, tmp_reg);
EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
EMIT(PPC_RAW_SLW(dst_reg_h, src2_reg_h, src_reg));
EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
EMIT(PPC_RAW_SRW(_R0, src2_reg, _R0));
EMIT(PPC_RAW_SLW(tmp_reg, src2_reg, tmp_reg));
EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0));
EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg));
EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg));
break;
case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */
if (imm)
EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm));
else
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
break;
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */
if (imm < 0)
return -EINVAL;
if (!imm) {
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
} else if (imm < 32) {
EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm));
EMIT(PPC_RAW_RLWIMI(dst_reg_h, src2_reg, imm, 32 - imm, 31));
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, imm, 0, 31 - imm));
} else if (imm < 64) {
EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg, imm, 0, 31 - imm));
EMIT(PPC_RAW_LI(dst_reg, 0));
} else {
EMIT(PPC_RAW_LI(dst_reg_h, 0));
EMIT(PPC_RAW_LI(dst_reg, 0));
}
break;
case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
bpf_set_seen_register(ctx, tmp_reg);
EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0));
EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg));
EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
EMIT(PPC_RAW_SRW(dst_reg_h, src2_reg_h, src_reg));
EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
break;
case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
if (imm)
EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, imm));
else
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
break;
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
if (imm < 0)
return -EINVAL;
if (!imm) {
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
} else if (imm < 32) {
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, 32 - imm, imm, 31));
} else if (imm < 64) {
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg_h, 64 - imm, imm - 32, 31));
EMIT(PPC_RAW_LI(dst_reg_h, 0));
} else {
EMIT(PPC_RAW_LI(dst_reg, 0));
EMIT(PPC_RAW_LI(dst_reg_h, 0));
}
break;
case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
EMIT(PPC_RAW_SRAW(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
bpf_set_seen_register(ctx, tmp_reg);
EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0));
EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26));
EMIT(PPC_RAW_SRAW(tmp_reg, src2_reg_h, tmp_reg));
EMIT(PPC_RAW_SRAW(dst_reg_h, src2_reg_h, src_reg));
EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0));
EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
if (imm)
EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, imm));
else
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
break;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
if (imm < 0)
return -EINVAL;
if (!imm) {
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
} else if (imm < 32) {
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm));
} else if (imm < 64) {
EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, imm - 32));
EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
} else {
EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, 31));
EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
}
break;
/*
* MOV
*/
case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
if (dst_reg == src_reg)
break;
EMIT(PPC_RAW_MR(dst_reg, src_reg));
EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
break;
case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
/* special mov32 for zext */
if (imm == 1)
EMIT(PPC_RAW_LI(dst_reg_h, 0));
else if (dst_reg != src_reg)
EMIT(PPC_RAW_MR(dst_reg, src_reg));
break;
case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
PPC_LI32(dst_reg, imm);
PPC_EX32(dst_reg_h, imm);
break;
case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
PPC_LI32(dst_reg, imm);
break;
/*
* BPF_FROM_BE/LE
*/
case BPF_ALU | BPF_END | BPF_FROM_LE:
switch (imm) {
case 16:
/* Copy 16 bits to upper part */
EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg, 16, 0, 15));
/* Rotate 8 bits right & mask */
EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31));
break;
case 32:
/*
* Rotate word left by 8 bits:
* 2 bytes are already in their final position
* -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
*/
EMIT(PPC_RAW_RLWINM(_R0, src2_reg, 8, 0, 31));
/* Rotate 24 bits and insert byte 1 */
EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 0, 7));
/* Rotate 24 bits and insert byte 3 */
EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 16, 23));
EMIT(PPC_RAW_MR(dst_reg, _R0));
break;
case 64:
bpf_set_seen_register(ctx, tmp_reg);
EMIT(PPC_RAW_RLWINM(tmp_reg, src2_reg, 8, 0, 31));
EMIT(PPC_RAW_RLWINM(_R0, src2_reg_h, 8, 0, 31));
/* Rotate 24 bits and insert byte 1 */
EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 0, 7));
EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 0, 7));
/* Rotate 24 bits and insert byte 3 */
EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 16, 23));
EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 16, 23));
EMIT(PPC_RAW_MR(dst_reg, _R0));
EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
break;
}
break;
case BPF_ALU | BPF_END | BPF_FROM_BE:
switch (imm) {
case 16:
/* zero-extend 16 bits into 32 bits */
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 16, 31));
break;
case 32:
case 64:
/* nop */
break;
}
break;
/*
* BPF_ST NOSPEC (speculation barrier)
*/
case BPF_ST | BPF_NOSPEC:
break;
/*
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
break;
case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_STB(_R0, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
break;
case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_STH(_R0, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
break;
case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_STW(_R0, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off));
EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4));
break;
case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4));
PPC_EX32(_R0, imm);
EMIT(PPC_RAW_STW(_R0, dst_reg, off));
break;
/*
* BPF_STX ATOMIC (atomic ops)
*/
case BPF_STX | BPF_ATOMIC | BPF_W:
save_reg = _R0;
ret_reg = src_reg;
bpf_set_seen_register(ctx, tmp_reg);
bpf_set_seen_register(ctx, ax_reg);
/* Get offset into TMP_REG */
EMIT(PPC_RAW_LI(tmp_reg, off));
tmp_idx = ctx->idx * 4;
/* load value from memory into r0 */
EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
/* Save old value in BPF_REG_AX */
if (imm & BPF_FETCH)
EMIT(PPC_RAW_MR(ax_reg, _R0));
switch (imm) {
case BPF_ADD:
case BPF_ADD | BPF_FETCH:
EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
break;
case BPF_AND:
case BPF_AND | BPF_FETCH:
EMIT(PPC_RAW_AND(_R0, _R0, src_reg));
break;
case BPF_OR:
case BPF_OR | BPF_FETCH:
EMIT(PPC_RAW_OR(_R0, _R0, src_reg));
break;
case BPF_XOR:
case BPF_XOR | BPF_FETCH:
EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
break;
case BPF_CMPXCHG:
/*
* Return old value in BPF_REG_0 for BPF_CMPXCHG &
* in src_reg for other cases.
*/
ret_reg = bpf_to_ppc(BPF_REG_0);
/* Compare with old value in BPF_REG_0 */
EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0));
/* Don't set if different from old value */
PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
fallthrough;
case BPF_XCHG:
save_reg = src_reg;
break;
default:
pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
code, i);
return -EOPNOTSUPP;
}
/* store new value */
EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg));
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, tmp_idx);
/* For the BPF_FETCH variant, get old data into src_reg */
if (imm & BPF_FETCH) {
EMIT(PPC_RAW_MR(ret_reg, ax_reg));
if (!fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
}
break;
case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */
return -EOPNOTSUPP;
/*
* BPF_LDX
*/
case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
/*
* As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
* kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
* load only if addr is kernel address (see is_kernel_addr()), otherwise
* set dst_reg=0 and move on.
*/
if (BPF_MODE(code) == BPF_PROBE_MEM) {
PPC_LI32(_R0, TASK_SIZE - off);
EMIT(PPC_RAW_CMPLW(src_reg, _R0));
PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4);
EMIT(PPC_RAW_LI(dst_reg, 0));
/*
* For BPF_DW case, "li reg_h,0" would be needed when
* !fp->aux->verifier_zext. Emit NOP otherwise.
*
* Note that "li reg_h,0" is emitted for BPF_B/H/W case,
* if necessary. So, jump there insted of emitting an
* additional "li reg_h,0" instruction.
*/
if (size == BPF_DW && !fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(dst_reg_h, 0));
else
EMIT(PPC_RAW_NOP());
/*
* Need to jump two instructions instead of one for BPF_DW case
* as there are two load instructions for dst_reg_h & dst_reg
* respectively.
*/
if (size == BPF_DW)
PPC_JMP((ctx->idx + 3) * 4);
else
PPC_JMP((ctx->idx + 2) * 4);
}
switch (size) {
case BPF_B:
EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
break;
case BPF_H:
EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
break;
case BPF_W:
EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
break;
case BPF_DW:
EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
break;
}
if (size != BPF_DW && !fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(dst_reg_h, 0));
if (BPF_MODE(code) == BPF_PROBE_MEM) {
int insn_idx = ctx->idx - 1;
int jmp_off = 4;
/*
* In case of BPF_DW, two lwz instructions are emitted, one
* for higher 32-bit and another for lower 32-bit. So, set
* ex->insn to the first of the two and jump over both
* instructions in fixup.
*
* Similarly, with !verifier_zext, two instructions are
* emitted for BPF_B/H/W case. So, set ex->insn to the
* instruction that could fault and skip over both
* instructions.
*/
if (size == BPF_DW || !fp->aux->verifier_zext) {
insn_idx -= 1;
jmp_off += 4;
}
ret = bpf_add_extable_entry(fp, image, pass, ctx, insn_idx,
jmp_off, dst_reg);
if (ret)
return ret;
}
break;
/*
* Doubleword load
* 16 byte instruction that uses two 'struct bpf_insn'
*/
case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
tmp_idx = ctx->idx;
PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
PPC_LI32(dst_reg, (u32)insn[i].imm);
/* padding to allow full 4 instructions for later patching */
if (!image)
for (j = ctx->idx - tmp_idx; j < 4; j++)
EMIT(PPC_RAW_NOP());
/* Adjust for two bpf instructions */
addrs[++i] = ctx->idx * 4;
break;
/*
* Return/Exit
*/
case BPF_JMP | BPF_EXIT:
/*
* If this isn't the very last instruction, branch to
* the epilogue. If we _are_ the last instruction,
* we'll just fall through to the epilogue.
*/
if (i != flen - 1) {
ret = bpf_jit_emit_exit_insn(image, ctx, _R0, exit_addr);
if (ret)
return ret;
}
/* else fall through to the epilogue */
break;
/*
* Call kernel helper or bpf function
*/
case BPF_JMP | BPF_CALL:
ctx->seen |= SEEN_FUNC;
ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
&func_addr, &func_addr_fixed);
if (ret < 0)
return ret;
if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) {
EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5) - 1, _R1, 8));
EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12));
}
ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
if (ret)
return ret;
EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0) - 1, _R3));
EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R4));
break;
/*
* Jumps and branches
*/
case BPF_JMP | BPF_JA:
PPC_JMP(addrs[i + 1 + off]);
break;
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP32 | BPF_JGT | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_X:
case BPF_JMP32 | BPF_JSGT | BPF_K:
case BPF_JMP32 | BPF_JSGT | BPF_X:
true_cond = COND_GT;
goto cond_branch;
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP32 | BPF_JLT | BPF_K:
case BPF_JMP32 | BPF_JLT | BPF_X:
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSLT | BPF_X:
true_cond = COND_LT;
goto cond_branch;
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JGE | BPF_K:
case BPF_JMP32 | BPF_JGE | BPF_X:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_X:
true_cond = COND_GE;
goto cond_branch;
case BPF_JMP | BPF_JLE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_X:
case BPF_JMP32 | BPF_JLE | BPF_K:
case BPF_JMP32 | BPF_JLE | BPF_X:
case BPF_JMP32 | BPF_JSLE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_X:
true_cond = COND_LE;
goto cond_branch;
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_X:
true_cond = COND_EQ;
goto cond_branch;
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JNE | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JNE | BPF_X:
true_cond = COND_NE;
goto cond_branch;
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_X:
true_cond = COND_NE;
/* fallthrough; */
cond_branch:
switch (code) {
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JLT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JLE | BPF_X:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JNE | BPF_X:
/* unsigned comparison */
EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
break;
case BPF_JMP32 | BPF_JGT | BPF_X:
case BPF_JMP32 | BPF_JLT | BPF_X:
case BPF_JMP32 | BPF_JGE | BPF_X:
case BPF_JMP32 | BPF_JLE | BPF_X:
case BPF_JMP32 | BPF_JEQ | BPF_X:
case BPF_JMP32 | BPF_JNE | BPF_X:
/* unsigned comparison */
EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
break;
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
case BPF_JMP | BPF_JSGE | BPF_X:
case BPF_JMP | BPF_JSLE | BPF_X:
/* signed comparison */
EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
break;
case BPF_JMP32 | BPF_JSGT | BPF_X:
case BPF_JMP32 | BPF_JSLT | BPF_X:
case BPF_JMP32 | BPF_JSGE | BPF_X:
case BPF_JMP32 | BPF_JSLE | BPF_X:
/* signed comparison */
EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
break;
case BPF_JMP | BPF_JSET | BPF_X:
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
break;
case BPF_JMP32 | BPF_JSET | BPF_X: {
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
break;
case BPF_JMP | BPF_JNE | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JLT | BPF_K:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JLE | BPF_K:
/*
* Need sign-extended load, so only positive
* values can be used as imm in cmplwi
*/
if (imm >= 0 && imm < 32768) {
EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
} else {
/* sign-extending load ... but unsigned comparison */
PPC_EX32(_R0, imm);
EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0));
PPC_LI32(_R0, imm);
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
}
break;
case BPF_JMP32 | BPF_JNE | BPF_K:
case BPF_JMP32 | BPF_JEQ | BPF_K:
case BPF_JMP32 | BPF_JGT | BPF_K:
case BPF_JMP32 | BPF_JLT | BPF_K:
case BPF_JMP32 | BPF_JGE | BPF_K:
case BPF_JMP32 | BPF_JLE | BPF_K:
if (imm >= 0 && imm < 65536) {
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
} else {
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
}
break;
}
case BPF_JMP | BPF_JSGT | BPF_K:
case BPF_JMP | BPF_JSLT | BPF_K:
case BPF_JMP | BPF_JSGE | BPF_K:
case BPF_JMP | BPF_JSLE | BPF_K:
if (imm >= 0 && imm < 65536) {
EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
} else {
/* sign-extending load */
EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
PPC_LI32(_R0, imm);
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
}
break;
case BPF_JMP32 | BPF_JSGT | BPF_K:
case BPF_JMP32 | BPF_JSLT | BPF_K:
case BPF_JMP32 | BPF_JSGE | BPF_K:
case BPF_JMP32 | BPF_JSLE | BPF_K:
/*
* signed comparison, so any 16-bit value
* can be used in cmpwi
*/
if (imm >= -32768 && imm < 32768) {
EMIT(PPC_RAW_CMPWI(dst_reg, imm));
} else {
/* sign-extending load */
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_CMPW(dst_reg, _R0));
}
break;
case BPF_JMP | BPF_JSET | BPF_K:
/* andi does not sign-extend the immediate */
if (imm >= 0 && imm < 32768) {
/* PPC_ANDI is _only/always_ dot-form */
EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
} else {
PPC_LI32(_R0, imm);
if (imm < 0) {
EMIT(PPC_RAW_CMPWI(dst_reg_h, 0));
PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
}
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
}
break;
case BPF_JMP32 | BPF_JSET | BPF_K:
/* andi does not sign-extend the immediate */
if (imm >= 0 && imm < 32768) {
/* PPC_ANDI is _only/always_ dot-form */
EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
} else {
PPC_LI32(_R0, imm);
EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
}
break;
}
PPC_BCC(true_cond, addrs[i + 1 + off]);
break;
/*
* Tail call
*/
case BPF_JMP | BPF_TAIL_CALL:
ctx->seen |= SEEN_TAILCALL;
ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
if (ret < 0)
return ret;
break;
default:
/*
* The filter contains something cruel & unusual.
* We don't handle it, but also there shouldn't be
* anything missing from our list.
*/
pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i);
return -EOPNOTSUPP;
}
if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
!insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
EMIT(PPC_RAW_LI(dst_reg_h, 0));
}
/* Set end-of-body-code address for exit. */
addrs[i] = ctx->idx * 4;
return 0;
}
| linux-master | arch/powerpc/net/bpf_jit_comp32.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* devtree.c - convenience functions for device tree manipulation
* Copyright 2007 David Gibson, IBM Corporation.
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* Authors: David Gibson <[email protected]>
* Scott Wood <[email protected]>
*/
#include <stdarg.h>
#include <stddef.h>
#include "types.h"
#include "string.h"
#include "stdio.h"
#include "ops.h"
#include "of.h"
void dt_fixup_memory(u64 start, u64 size)
{
void *root, *memory;
int naddr, nsize, i;
u32 memreg[4];
root = finddevice("/");
if (getprop(root, "#address-cells", &naddr, sizeof(naddr)) < 0)
naddr = 2;
else
naddr = be32_to_cpu(naddr);
if (naddr < 1 || naddr > 2)
fatal("Can't cope with #address-cells == %d in /\n\r", naddr);
if (getprop(root, "#size-cells", &nsize, sizeof(nsize)) < 0)
nsize = 1;
else
nsize = be32_to_cpu(nsize);
if (nsize < 1 || nsize > 2)
fatal("Can't cope with #size-cells == %d in /\n\r", nsize);
i = 0;
if (naddr == 2)
memreg[i++] = cpu_to_be32(start >> 32);
memreg[i++] = cpu_to_be32(start & 0xffffffff);
if (nsize == 2)
memreg[i++] = cpu_to_be32(size >> 32);
memreg[i++] = cpu_to_be32(size & 0xffffffff);
memory = finddevice("/memory");
if (! memory) {
memory = create_node(NULL, "memory");
setprop_str(memory, "device_type", "memory");
}
printf("Memory <- <0x%x", be32_to_cpu(memreg[0]));
for (i = 1; i < (naddr + nsize); i++)
printf(" 0x%x", be32_to_cpu(memreg[i]));
printf("> (%ldMB)\n\r", (unsigned long)(size >> 20));
setprop(memory, "reg", memreg, (naddr + nsize)*sizeof(u32));
}
#define MHZ(x) ((x + 500000) / 1000000)
void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus)
{
void *devp = NULL;
printf("CPU clock-frequency <- 0x%x (%dMHz)\n\r", cpu, MHZ(cpu));
printf("CPU timebase-frequency <- 0x%x (%dMHz)\n\r", tb, MHZ(tb));
if (bus > 0)
printf("CPU bus-frequency <- 0x%x (%dMHz)\n\r", bus, MHZ(bus));
while ((devp = find_node_by_devtype(devp, "cpu"))) {
setprop_val(devp, "clock-frequency", cpu_to_be32(cpu));
setprop_val(devp, "timebase-frequency", cpu_to_be32(tb));
if (bus > 0)
setprop_val(devp, "bus-frequency", cpu_to_be32(bus));
}
timebase_period_ns = 1000000000 / tb;
}
void dt_fixup_clock(const char *path, u32 freq)
{
void *devp = finddevice(path);
if (devp) {
printf("%s: clock-frequency <- %x (%dMHz)\n\r", path, freq, MHZ(freq));
setprop_val(devp, "clock-frequency", cpu_to_be32(freq));
}
}
void dt_fixup_mac_address_by_alias(const char *alias, const u8 *addr)
{
void *devp = find_node_by_alias(alias);
if (devp) {
printf("%s: local-mac-address <-"
" %02x:%02x:%02x:%02x:%02x:%02x\n\r", alias,
addr[0], addr[1], addr[2],
addr[3], addr[4], addr[5]);
setprop(devp, "local-mac-address", addr, 6);
}
}
void dt_fixup_mac_address(u32 index, const u8 *addr)
{
void *devp = find_node_by_prop_value(NULL, "linux,network-index",
(void*)&index, sizeof(index));
if (devp) {
printf("ENET%d: local-mac-address <-"
" %02x:%02x:%02x:%02x:%02x:%02x\n\r", index,
addr[0], addr[1], addr[2],
addr[3], addr[4], addr[5]);
setprop(devp, "local-mac-address", addr, 6);
}
}
void __dt_fixup_mac_addresses(u32 startindex, ...)
{
va_list ap;
u32 index = startindex;
const u8 *addr;
va_start(ap, startindex);
while ((addr = va_arg(ap, const u8 *)))
dt_fixup_mac_address(index++, addr);
va_end(ap);
}
#define MAX_ADDR_CELLS 4
void dt_get_reg_format(void *node, u32 *naddr, u32 *nsize)
{
if (getprop(node, "#address-cells", naddr, 4) != 4)
*naddr = 2;
else
*naddr = be32_to_cpu(*naddr);
if (getprop(node, "#size-cells", nsize, 4) != 4)
*nsize = 1;
else
*nsize = be32_to_cpu(*nsize);
}
static void copy_val(u32 *dest, u32 *src, int naddr)
{
int pad = MAX_ADDR_CELLS - naddr;
memset(dest, 0, pad * 4);
memcpy(dest + pad, src, naddr * 4);
}
static int sub_reg(u32 *reg, u32 *sub)
{
int i, borrow = 0;
for (i = MAX_ADDR_CELLS - 1; i >= 0; i--) {
int prev_borrow = borrow;
borrow = reg[i] < sub[i] + prev_borrow;
reg[i] -= sub[i] + prev_borrow;
}
return !borrow;
}
static int add_reg(u32 *reg, u32 *add, int naddr)
{
int i, carry = 0;
for (i = MAX_ADDR_CELLS - 1; i >= MAX_ADDR_CELLS - naddr; i--) {
u64 tmp = (u64)be32_to_cpu(reg[i]) + be32_to_cpu(add[i]) + carry;
carry = tmp >> 32;
reg[i] = cpu_to_be32((u32)tmp);
}
return !carry;
}
/* It is assumed that if the first byte of reg fits in a
* range, then the whole reg block fits.
*/
static int compare_reg(u32 *reg, u32 *range, u32 *rangesize)
{
int i;
u32 end;
for (i = 0; i < MAX_ADDR_CELLS; i++) {
if (be32_to_cpu(reg[i]) < be32_to_cpu(range[i]))
return 0;
if (be32_to_cpu(reg[i]) > be32_to_cpu(range[i]))
break;
}
for (i = 0; i < MAX_ADDR_CELLS; i++) {
end = be32_to_cpu(range[i]) + be32_to_cpu(rangesize[i]);
if (be32_to_cpu(reg[i]) < end)
break;
if (be32_to_cpu(reg[i]) > end)
return 0;
}
return reg[i] != end;
}
/* reg must be MAX_ADDR_CELLS */
static int find_range(u32 *reg, u32 *ranges, int nregaddr,
int naddr, int nsize, int buflen)
{
int nrange = nregaddr + naddr + nsize;
int i;
for (i = 0; i + nrange <= buflen; i += nrange) {
u32 range_addr[MAX_ADDR_CELLS];
u32 range_size[MAX_ADDR_CELLS];
copy_val(range_addr, ranges + i, nregaddr);
copy_val(range_size, ranges + i + nregaddr + naddr, nsize);
if (compare_reg(reg, range_addr, range_size))
return i;
}
return -1;
}
/* Currently only generic buses without special encodings are supported.
* In particular, PCI is not supported. Also, only the beginning of the
* reg block is tracked; size is ignored except in ranges.
*/
static u32 prop_buf[MAX_PROP_LEN / 4];
static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
unsigned long *size)
{
u32 last_addr[MAX_ADDR_CELLS];
u32 this_addr[MAX_ADDR_CELLS];
void *parent;
u64 ret_addr, ret_size;
u32 naddr, nsize, prev_naddr, prev_nsize;
int buflen, offset;
parent = get_parent(node);
if (!parent)
return 0;
dt_get_reg_format(parent, &naddr, &nsize);
if (nsize > 2)
return 0;
offset = (naddr + nsize) * res;
if (reglen < offset + naddr + nsize ||
MAX_PROP_LEN < (offset + naddr + nsize) * 4)
return 0;
copy_val(last_addr, prop_buf + offset, naddr);
ret_size = be32_to_cpu(prop_buf[offset + naddr]);
if (nsize == 2) {
ret_size <<= 32;
ret_size |= be32_to_cpu(prop_buf[offset + naddr + 1]);
}
for (;;) {
prev_naddr = naddr;
prev_nsize = nsize;
node = parent;
parent = get_parent(node);
if (!parent)
break;
dt_get_reg_format(parent, &naddr, &nsize);
buflen = getprop(node, "ranges", prop_buf,
sizeof(prop_buf));
if (buflen == 0)
continue;
if (buflen < 0 || buflen > sizeof(prop_buf))
return 0;
offset = find_range(last_addr, prop_buf, prev_naddr,
naddr, prev_nsize, buflen / 4);
if (offset < 0)
return 0;
copy_val(this_addr, prop_buf + offset, prev_naddr);
if (!sub_reg(last_addr, this_addr))
return 0;
copy_val(this_addr, prop_buf + offset + prev_naddr, naddr);
if (!add_reg(last_addr, this_addr, naddr))
return 0;
}
if (naddr > 2)
return 0;
ret_addr = ((u64)be32_to_cpu(last_addr[2]) << 32) | be32_to_cpu(last_addr[3]);
if (sizeof(void *) == 4 &&
(ret_addr >= 0x100000000ULL || ret_size > 0x100000000ULL ||
ret_addr + ret_size > 0x100000000ULL))
return 0;
*addr = ret_addr;
if (size)
*size = ret_size;
return 1;
}
int dt_xlate_reg(void *node, int res, unsigned long *addr, unsigned long *size)
{
int reglen;
reglen = getprop(node, "reg", prop_buf, sizeof(prop_buf)) / 4;
return dt_xlate(node, res, reglen, addr, size);
}
int dt_xlate_addr(void *node, u32 *buf, int buflen, unsigned long *xlated_addr)
{
if (buflen > sizeof(prop_buf))
return 0;
memcpy(prop_buf, buf, buflen);
return dt_xlate(node, 0, buflen / 4, xlated_addr, NULL);
}
int dt_is_compatible(void *node, const char *compat)
{
char *buf = (char *)prop_buf;
int len, pos;
len = getprop(node, "compatible", buf, MAX_PROP_LEN);
if (len < 0)
return 0;
for (pos = 0; pos < len; pos++) {
if (!strcmp(buf + pos, compat))
return 1;
pos += strnlen(&buf[pos], len - pos);
}
return 0;
}
int dt_get_virtual_reg(void *node, void **addr, int nres)
{
unsigned long xaddr;
int n, i;
n = getprop(node, "virtual-reg", addr, nres * 4);
if (n > 0) {
for (i = 0; i < n/4; i ++)
((u32 *)addr)[i] = be32_to_cpu(((u32 *)addr)[i]);
return n / 4;
}
for (n = 0; n < nres; n++) {
if (!dt_xlate_reg(node, n, &xaddr, NULL))
break;
addr[n] = (void *)xaddr;
}
return n;
}
| linux-master | arch/powerpc/boot/devtree.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Motorola/Emerson MVME5100 with PPCBug firmware.
*
* Author: Stephen Chivers <[email protected]>
*
* Copyright 2013 CSC Australia Pty. Ltd.
*/
#include "types.h"
#include "ops.h"
#include "io.h"
BSS_STACK(4096);
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5)
{
u32 heapsize;
heapsize = 0x8000000 - (u32)_end; /* 128M */
simple_alloc_init(_end, heapsize, 32, 64);
fdt_init(_dtb_start);
serial_console_init();
}
| linux-master | arch/powerpc/boot/mvme5100.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Compatibility for old (not device tree aware) U-Boot versions
*
* Author: Scott Wood <[email protected]>
* Consolidated using macros by David Gibson <[email protected]>
*
* Copyright 2007 David Gibson, IBM Corporation.
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*/
#include "ops.h"
#include "stdio.h"
#include "ppcboot.h"
void cuboot_init(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
unsigned long end_of_ram)
{
unsigned long avail_ram = end_of_ram - (unsigned long)_end;
loader_info.initrd_addr = r4;
loader_info.initrd_size = r4 ? r5 - r4 : 0;
loader_info.cmdline = (char *)r6;
loader_info.cmdline_len = r7 - r6;
simple_alloc_init(_end, avail_ram - 1024*1024, 32, 64);
}
| linux-master | arch/powerpc/boot/cuboot.c |
// SPDX-License-Identifier: GPL-2.0
/*
* MPC5200 PSC serial console support.
*
* Author: Grant Likely <[email protected]>
*
* Copyright (c) 2007 Secret Lab Technologies Ltd.
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* It is assumed that the firmware (or the platform file) has already set
* up the port.
*/
#include "types.h"
#include "io.h"
#include "ops.h"
/* Programmable Serial Controller (PSC) status register bits */
#define MPC52xx_PSC_SR 0x04
#define MPC52xx_PSC_SR_RXRDY 0x0100
#define MPC52xx_PSC_SR_RXFULL 0x0200
#define MPC52xx_PSC_SR_TXRDY 0x0400
#define MPC52xx_PSC_SR_TXEMP 0x0800
#define MPC52xx_PSC_BUFFER 0x0C
static void *psc;
static int psc_open(void)
{
/* Assume the firmware has already configured the PSC into
* uart mode */
return 0;
}
static void psc_putc(unsigned char c)
{
while (!(in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_TXRDY)) ;
out_8(psc + MPC52xx_PSC_BUFFER, c);
}
static unsigned char psc_tstc(void)
{
return (in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_RXRDY) != 0;
}
static unsigned char psc_getc(void)
{
while (!(in_be16(psc + MPC52xx_PSC_SR) & MPC52xx_PSC_SR_RXRDY)) ;
return in_8(psc + MPC52xx_PSC_BUFFER);
}
int mpc5200_psc_console_init(void *devp, struct serial_console_data *scdp)
{
/* Get the base address of the psc registers */
if (dt_get_virtual_reg(devp, &psc, 1) < 1)
return -1;
scdp->open = psc_open;
scdp->putc = psc_putc;
scdp->getc = psc_getc;
scdp->tstc = psc_tstc;
return 0;
}
| linux-master | arch/powerpc/boot/mpc52xx-psc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright 2007 David Gibson, IBM Corporation.
*
* Based on earlier code:
* Copyright (C) Paul Mackerras 1997.
*
* Matt Porter <[email protected]>
* Copyright 2002-2005 MontaVista Software Inc.
*
* Eugene Surovegin <[email protected]> or <[email protected]>
* Copyright (c) 2003, 2004 Zultys Technologies
*/
#include <stdarg.h>
#include <stddef.h>
#include "types.h"
#include "elf.h"
#include "string.h"
#include "stdio.h"
#include "page.h"
#include "ops.h"
#include "reg.h"
#include "io.h"
#include "dcr.h"
#include "4xx.h"
#include "44x.h"
static u8 *ebony_mac0, *ebony_mac1;
#define EBONY_FPGA_PATH "/plb/opb/ebc/fpga"
#define EBONY_FPGA_FLASH_SEL 0x01
#define EBONY_SMALL_FLASH_PATH "/plb/opb/ebc/small-flash"
static void ebony_flashsel_fixup(void)
{
void *devp;
u32 reg[3] = {0x0, 0x0, 0x80000};
u8 *fpga;
u8 fpga_reg0 = 0x0;
devp = finddevice(EBONY_FPGA_PATH);
if (!devp)
fatal("Couldn't locate FPGA node %s\n\r", EBONY_FPGA_PATH);
if (getprop(devp, "virtual-reg", &fpga, sizeof(fpga)) != sizeof(fpga))
fatal("%s has missing or invalid virtual-reg property\n\r",
EBONY_FPGA_PATH);
fpga_reg0 = in_8(fpga);
devp = finddevice(EBONY_SMALL_FLASH_PATH);
if (!devp)
fatal("Couldn't locate small flash node %s\n\r",
EBONY_SMALL_FLASH_PATH);
if (getprop(devp, "reg", reg, sizeof(reg)) != sizeof(reg))
fatal("%s has reg property of unexpected size\n\r",
EBONY_SMALL_FLASH_PATH);
/* Invert address bit 14 (IBM-endian) if FLASH_SEL fpga bit is set */
if (fpga_reg0 & EBONY_FPGA_FLASH_SEL)
reg[1] ^= 0x80000;
setprop(devp, "reg", reg, sizeof(reg));
}
static void ebony_fixups(void)
{
// FIXME: sysclk should be derived by reading the FPGA registers
unsigned long sysclk = 33000000;
ibm440gp_fixup_clocks(sysclk, 6 * 1843200);
ibm4xx_sdram_fixup_memsize();
dt_fixup_mac_address_by_alias("ethernet0", ebony_mac0);
dt_fixup_mac_address_by_alias("ethernet1", ebony_mac1);
ibm4xx_fixup_ebc_ranges("/plb/opb/ebc");
ebony_flashsel_fixup();
}
void ebony_init(void *mac0, void *mac1)
{
platform_ops.fixups = ebony_fixups;
platform_ops.exit = ibm44x_dbcr_reset;
ebony_mac0 = mac0;
ebony_mac1 = mac1;
fdt_init(_dtb_start);
serial_console_init();
}
| linux-master | arch/powerpc/boot/ebony.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.